text,target "def interp_like(self, other, method='linear', assume_sorted=False, kwargs={}): """"""Interpolate this object onto the coordinates of another object, filling the out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. Notes ----- scipy is required. If the dataset has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- Dataset.interp Dataset.reindex_like """""" coords = alignment.reindex_like_indexers(self, other) numeric_coords = OrderedDict() object_coords = OrderedDict() for k, v in coords.items(): if v.dtype.kind in 'uifcMm': numeric_coords[k] = v else: object_coords[k] = v ds = self if object_coords: # We do not support interpolation along object coordinate. # reindex instead. ds = self.reindex(object_coords) return ds.interp(numeric_coords, method, assume_sorted, kwargs)","Interpolate this object onto the coordinates of another object, filling the out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. Notes ----- scipy is required. If the dataset has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- Dataset.interp Dataset.reindex_like" "def root_frame(self, trim_stem=True): ''' Parses the internal frame records and returns a tree of Frame objects ''' root_frame = None frame_stack = [] for frame_tuple in self.frame_records: identifier_stack = frame_tuple[0] time = frame_tuple[1] # now we must create a stack of frame objects and assign this time to the leaf for stack_depth, frame_identifier in enumerate(identifier_stack): if stack_depth < len(frame_stack): if frame_identifier != frame_stack[stack_depth].identifier: # trim any frames after and including this one del frame_stack[stack_depth:] if stack_depth >= len(frame_stack): frame = Frame(frame_identifier) frame_stack.append(frame) if stack_depth == 0: # There should only be one root frame, as far as I know assert root_frame is None, ASSERTION_MESSAGE root_frame = frame else: parent = frame_stack[stack_depth-1] parent.add_child(frame) # trim any extra frames del frame_stack[stack_depth+1:] # pylint: disable=W0631 # assign the time to the final frame frame_stack[-1].add_child(SelfTimeFrame(self_time=time)) if root_frame is None: return None if trim_stem: root_frame = self._trim_stem(root_frame) return root_frame",Parses the internal frame records and returns a tree of Frame objects "def matrix_chain_mult(M): """"""Matrix chain multiplication :param M: list of matrices :returns: M[0] * ... * M[-1], computed in time optimal order :complexity: whatever is needed by the multiplications """""" opt, arg = matrix_mult_opt_order(M) return _apply_order(M, arg, 0, len(M)-1)","Matrix chain multiplication :param M: list of matrices :returns: M[0] * ... * M[-1], computed in time optimal order :complexity: whatever is needed by the multiplications" "def variants(self, case_id, skip=0, count=1000, filters=None): """"""Return all variants in the VCF. This function will apply the given filter and return the 'count' first variants. If skip the first 'skip' variants will not be regarded. Args: case_id (str): Path to a vcf file (for this adapter) skip (int): Skip first variants count (int): The number of variants to return filters (dict): A dictionary with filters. Currently this will look like: { gene_list: [] (list of hgnc ids), frequency: None (float), cadd: None (float), sv_len: None (float), consequence: [] (list of consequences), is_lof: None (Bool), genetic_models [] (list of genetic models) sv_type: List (list of sv types), } Returns: puzzle.constants.Results : Named tuple with variants and nr_of_variants """""" filters = filters or {} case_obj = self.case(case_id=case_id) limit = count + skip genes = set() if filters.get('gene_ids'): genes = set([gene_id.strip() for gene_id in filters['gene_ids']]) frequency = None if filters.get('frequency'): frequency = float(filters['frequency']) cadd = None if filters.get('cadd'): cadd = float(filters['cadd']) genetic_models = None if filters.get('genetic_models'): genetic_models = set(filters['genetic_models']) sv_len = None if filters.get('sv_len'): sv_len = float(filters['sv_len']) impact_severities = None if filters.get('impact_severities'): impact_severities = set(filters['impact_severities']) vcf_file_path = case_obj.variant_source self.head = get_header(vcf_file_path) self.vep_header = self.head.vep_columns self.snpeff_header = self.head.snpeff_columns variants = self._get_filtered_variants(vcf_file_path, filters) result = [] skip_index = 0 for index, variant in enumerate(variants): index += 1 if skip_index >= skip: variant_obj = self._format_variants( variant=variant, index=index, case_obj=case_obj, ) if genes and variant_obj: if not set(variant_obj['gene_symbols']).intersection(genes): variant_obj = None if impact_severities and variant_obj: if not variant_obj['impact_severity'] in impact_severities: variant_obj = None if frequency and variant_obj: if variant_obj.max_freq > frequency: variant_obj = None if cadd and variant_obj: if variant_obj['cadd_score'] < cadd: variant_obj = None if genetic_models and variant_obj: models = set(variant_obj.genetic_models) if not models.intersection(genetic_models): variant_obj = None if sv_len and variant_obj: if variant_obj.sv_len < sv_len: variant_obj = None if variant_obj: skip_index += 1 if skip_index <= limit: result.append(variant_obj) else: break else: skip_index += 1 return Results(result, len(result))","Return all variants in the VCF. This function will apply the given filter and return the 'count' first variants. If skip the first 'skip' variants will not be regarded. Args: case_id (str): Path to a vcf file (for this adapter) skip (int): Skip first variants count (int): The number of variants to return filters (dict): A dictionary with filters. Currently this will look like: { gene_list: [] (list of hgnc ids), frequency: None (float), cadd: None (float), sv_len: None (float), consequence: [] (list of consequences), is_lof: None (Bool), genetic_models [] (list of genetic models) sv_type: List (list of sv types), } Returns: puzzle.constants.Results : Named tuple with variants and nr_of_variants" "def execute_pool_txns(self, three_pc_batch) -> List: """""" Execute a transaction that involves consensus pool management, like adding a node, client or a steward. :param ppTime: PrePrepare request time :param reqs_keys: requests keys to be committed """""" committed_txns = self.default_executer(three_pc_batch) for txn in committed_txns: self.poolManager.onPoolMembershipChange(txn) return committed_txns","Execute a transaction that involves consensus pool management, like adding a node, client or a steward. :param ppTime: PrePrepare request time :param reqs_keys: requests keys to be committed" "def setup(cls, app): # pragma: no cover """"""Called by Sphinx to setup an extension."""""" if cls.directive_name is None: raise NotImplementedError('directive_name must be set by ' 'subclasses of BaseDirective') if not app.registry.has_domain('http'): setup_httpdomain(app) app.add_config_value('{}_harness'.format(cls.directive_name), None, 'env') app.add_directive(cls.directive_name, cls) app.connect('builder-inited', cls.run_setup) app.connect('build-finished', cls.run_teardown) app.connect('env-get-outdated', cls.get_outdated_docs) app.connect('env-purge-doc', cls.purge_docs)",Called by Sphinx to setup an extension. "def get_ordering_fields_lookups(self): """""" Getting real model fields to order by """""" ordering_field = [] for field_name in self.get_ordering_fields(): ordering_field.append(self._get_ordering_field_lookup(field_name)) return ordering_field",Getting real model fields to order by "def _legacySpecialCases(form, patterns, parameter): """""" Create a view object for the given parameter. This function implements the remaining view construction logic which has not yet been converted to the C{viewFactory}-style expressed in L{_LiveFormMixin.form}. @type form: L{_LiveFormMixin} @param form: The form fragment which contains the given parameter. @type patterns: L{PatternDictionary} @type parameter: L{Parameter}, L{ChoiceParameter}, or L{ListParameter}. """""" p = patterns[parameter.type + '-input-container'] if parameter.type == TEXTAREA_INPUT: p = dictFillSlots(p, dict(label=parameter.label, name=parameter.name, value=parameter.default or '')) elif parameter.type == MULTI_TEXT_INPUT: subInputs = list() for i in xrange(parameter.count): subInputs.append(dictFillSlots(patterns['input'], dict(name=parameter.name + '_' + str(i), type='text', value=parameter.defaults[i]))) p = dictFillSlots(p, dict(label=parameter.label or parameter.name, inputs=subInputs)) else: if parameter.default is not None: value = parameter.default else: value = '' if parameter.type == CHECKBOX_INPUT and parameter.default: inputPattern = 'checked-checkbox-input' else: inputPattern = 'input' p = dictFillSlots( p, dict(label=parameter.label or parameter.name, input=dictFillSlots(patterns[inputPattern], dict(name=parameter.name, type=parameter.type, value=value)))) p(**{'class' : 'liveform_'+parameter.name}) if parameter.description: description = patterns['description'].fillSlots( 'description', parameter.description) else: description = '' return dictFillSlots( patterns['parameter-input'], dict(input=p, description=description))","Create a view object for the given parameter. This function implements the remaining view construction logic which has not yet been converted to the C{viewFactory}-style expressed in L{_LiveFormMixin.form}. @type form: L{_LiveFormMixin} @param form: The form fragment which contains the given parameter. @type patterns: L{PatternDictionary} @type parameter: L{Parameter}, L{ChoiceParameter}, or L{ListParameter}." "def dump_hash_prefix_values(self): """"""Export all hash prefix values. Returns a list of known hash prefix values """""" q = '''SELECT distinct value from hash_prefix''' output = [] with self.get_cursor() as dbc: dbc.execute(q) output = [bytes(r[0]) for r in dbc.fetchall()] return output","Export all hash prefix values. Returns a list of known hash prefix values" "def get_modis_tile_list(ds): """"""Helper function to identify MODIS tiles that intersect input geometry modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox) See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html """""" from demcoreg import modis_grid modis_dict = {} for key in modis_grid.modis_dict: modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key]) geom = geolib.ds_geom(ds) geom_dup = geolib.geom_dup(geom) ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs) geom_dup.Transform(ct) tile_list = [] for key, val in list(modis_dict.items()): if geom_dup.Intersects(val): tile_list.append(key) return tile_list","Helper function to identify MODIS tiles that intersect input geometry modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox) See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html" "def file_open(self, fn): """"""Yields the opening text of a file section in multipart HTTP. Parameters ---------- fn : str Filename for the file being opened and added to the HTTP body """""" yield b'--' yield self.boundary.encode() yield CRLF headers = content_disposition(fn) headers.update(content_type(fn)) for c in self._write_headers(headers): yield c","Yields the opening text of a file section in multipart HTTP. Parameters ---------- fn : str Filename for the file being opened and added to the HTTP body" "def gather_details(): """"""Get details about the host that is executing habu."""""" try: data = { 'kernel': platform.uname(), 'distribution': platform.linux_distribution(), 'libc': platform.libc_ver(), 'arch': platform.machine(), 'python_version': platform.python_version(), 'os_name': platform.system(), 'static_hostname': platform.node(), 'cpu': platform.processor(), 'fqdn': socket.getfqdn(), } except AttributeError: return {} return data",Get details about the host that is executing habu. "def save_ipv6(self, ip6, id_equip, descricao, id_net): """""" Save an IP6 and associate with equipment :param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx. :param id_equip: Equipment identifier. Integer value and greater than zero. :param descricao: IPv6 description. :param id_net: Network identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ipv6': {'id': < id >, 'block1': , 'block2': , 'block3': , 'block4': , 'block5': , 'block6': , 'block7': , 'block8': , 'descricao': < description >, 'equipamento': [ { all name equipamentos related } ], }} """""" if not is_valid_int_param(id_net): raise InvalidParameterError( u'Network identifier is invalid or was not informed.') if not is_valid_int_param(id_equip): raise InvalidParameterError( u'Equipment identifier is invalid or was not informed.') if ip6 is None or ip6 == """": raise InvalidParameterError( u'IPv6 is invalid or was not informed.') ip_map = dict() ip_map['id_net'] = id_net ip_map['descricao'] = descricao ip_map['ip6'] = ip6 ip_map['id_equip'] = id_equip url = ""ipv6/save/"" code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)","Save an IP6 and associate with equipment :param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx. :param id_equip: Equipment identifier. Integer value and greater than zero. :param descricao: IPv6 description. :param id_net: Network identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ipv6': {'id': < id >, 'block1': , 'block2': , 'block3': , 'block4': , 'block5': , 'block6': , 'block7': , 'block8': , 'descricao': < description >, 'equipamento': [ { all name equipamentos related } ], }}" "def is_tag_matched(self, tag, **attribute_filter): r"""""" Return true if the attributes matches in attribute filter. An attribute filter is a dictionary containing: {attribute_name: value}. This function will return True if and only if all attributes have the same value. This function allows to set the dictionary via kwargs, thus you can filter like this: example:: a.is_tag_matched(tag, name=""foobar"", other=""barfoo"") This function uses a fallback for attribute searching. It will by default use the namespace variant but fall back to the non-namespace variant. Thus specifiying :code:`{""name"": ""foobar""}` will match on :code:`` as well as on :code:``. :param lxml.etree.Element tag: specify the tag element :param attribute_filter: specify the attribute filter as dictionary """""" if len(attribute_filter) <= 0: return True for attr, value in attribute_filter.items(): _value = self.get_value_from_tag(tag, attr) if _value != value: return False return True","r"""""" Return true if the attributes matches in attribute filter. An attribute filter is a dictionary containing: {attribute_name: value}. This function will return True if and only if all attributes have the same value. This function allows to set the dictionary via kwargs, thus you can filter like this: example:: a.is_tag_matched(tag, name=""foobar"", other=""barfoo"") This function uses a fallback for attribute searching. It will by default use the namespace variant but fall back to the non-namespace variant. Thus specifiying :code:`{""name"": ""foobar""}` will match on :code:`` as well as on :code:``. :param lxml.etree.Element tag: specify the tag element :param attribute_filter: specify the attribute filter as dictionary" "def uncomment(name, regex, char='#', backup='.bak'): ''' Uncomment specified commented lines in a file name The full path to the file to be edited regex A regular expression used to find the lines that are to be uncommented. This regex should not include the comment character. A leading ``^`` character will be stripped for convenience (for easily switching between comment() and uncomment()). The regex will be searched for from the beginning of the line, ignoring leading spaces (we prepend '^[ \\t]*') char : ``#`` The character to remove in order to uncomment a line backup : ``.bak`` The file will be backed up before edit with this file extension; .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/adduser.conf: file.uncomment: - regex: EXTRA_GROUPS .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.uncomment') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) # Make sure the pattern appears in the file if __salt__['file.search']( name, '{0}[ \t]*{1}'.format(char, regex.lstrip('^')), multiline=True): # Line exists and is commented pass elif __salt__['file.search']( name, '^[ \t]*{0}'.format(regex.lstrip('^')), multiline=True): ret['comment'] = 'Pattern already uncommented' ret['result'] = True return ret else: return _error(ret, '{0}: Pattern not found'.format(regex)) if __opts__['test']: ret['changes'][name] = 'updated' ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.files.fopen(name, 'rb') as fp_: slines = salt.utils.data.decode(fp_.readlines()) # Perform the edit __salt__['file.comment_line'](name, regex, char, False, backup) with salt.utils.files.fopen(name, 'rb') as fp_: nlines = salt.utils.data.decode(fp_.readlines()) # Check the result ret['result'] = __salt__['file.search']( name, '^[ \t]*{0}'.format(regex.lstrip('^')), multiline=True ) if slines != nlines: if not __utils__['files.is_text'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if ret['result']: ret['comment'] = 'Uncommented lines successfully' else: ret['comment'] = 'Expected uncommented lines not found' return ret","Uncomment specified commented lines in a file name The full path to the file to be edited regex A regular expression used to find the lines that are to be uncommented. This regex should not include the comment character. A leading ``^`` character will be stripped for convenience (for easily switching between comment() and uncomment()). The regex will be searched for from the beginning of the line, ignoring leading spaces (we prepend '^[ \\t]*') char : ``#`` The character to remove in order to uncomment a line backup : ``.bak`` The file will be backed up before edit with this file extension; .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/adduser.conf: file.uncomment: - regex: EXTRA_GROUPS .. versionadded:: 0.9.5" "def unique_slug_required(form, slug): """"""Enforce a unique slug accross all pages and websistes."""""" if hasattr(form, 'instance') and form.instance.id: if Content.objects.exclude(page=form.instance).filter( body=slug, type=""slug"").count(): raise forms.ValidationError(error_dict['another_page_error']) elif Content.objects.filter(body=slug, type=""slug"").count(): raise forms.ValidationError(error_dict['another_page_error']) return slug",Enforce a unique slug accross all pages and websistes. "def query_put_bounders(query, partition_column, start, end): """""" Put bounders in the query Args: query: SQL query string partition_column: partition_column name start: lower_bound end: upper_bound Returns: Query with bounders """""" where = "" WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}"".format( partition_column, start, end ) query_with_bounders = ""SELECT * FROM ({0}) AS TMP_TABLE {1}"".format(query, where) return query_with_bounders","Put bounders in the query Args: query: SQL query string partition_column: partition_column name start: lower_bound end: upper_bound Returns: Query with bounders" "def get_default_org(self): """""" retrieve the name and configuration of the default org """""" for org in self.list_orgs(): org_config = self.get_org(org) if org_config.default: return org, org_config return None, None",retrieve the name and configuration of the default org "def getRegistered(self, context = """"): """""" Returns a dictionary with the currently registered SNMP objects. Returned is a dictionary objects for the specified ""context"", which defaults to the default context. """""" myobjs = {} try: # Python 2.x objs_iterator = self._objs[context].iteritems() except AttributeError: # Python 3.x objs_iterator = self._objs[context].items() for oidstr, snmpobj in objs_iterator: myobjs[oidstr] = { ""type"": type(snmpobj).__name__, ""value"": snmpobj.value() } return dict(myobjs)","Returns a dictionary with the currently registered SNMP objects. Returned is a dictionary objects for the specified ""context"", which defaults to the default context." "def to_dict(self): """"""Convert this object into a dictionary. Returns: dict: A dict with the same information as this object. """""" out_dict = {} out_dict['commands'] = self.commands out_dict['configs'] = self.configs out_dict['short_name'] = self.name out_dict['versions'] = { 'module': self.module_version, 'api': self.api_version } return out_dict","Convert this object into a dictionary. Returns: dict: A dict with the same information as this object." "def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None): ''' Gets info about the given api key CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_key apigw_api_key ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) response = conn.get_api_key(apiKey=apiKey) return {'apiKey': _convert_datetime_str(response)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}","Gets info about the given api key CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_key apigw_api_key" "def filter_cookies(self, request_url: URL=URL()) -> 'BaseCookie[str]': """"""Returns this jar's cookies filtered by their attributes."""""" self._do_expiration() request_url = URL(request_url) filtered = SimpleCookie() hostname = request_url.raw_host or """" is_not_secure = request_url.scheme not in (""https"", ""wss"") for cookie in self: name = cookie.key domain = cookie[""domain""] # Send shared cookies if not domain: filtered[name] = cookie.value continue if not self._unsafe and is_ip_address(hostname): continue if (domain, name) in self._host_only_cookies: if domain != hostname: continue elif not self._is_domain_match(domain, hostname): continue if not self._is_path_match(request_url.path, cookie[""path""]): continue if is_not_secure and cookie[""secure""]: continue # It's critical we use the Morsel so the coded_value # (based on cookie version) is preserved mrsl_val = cast('Morsel[str]', cookie.get(cookie.key, Morsel())) mrsl_val.set(cookie.key, cookie.value, cookie.coded_value) filtered[name] = mrsl_val return filtered",Returns this jar's cookies filtered by their attributes. "def auto_param_specs(self): """""" Parameter pecs in the sub-study class that are not explicitly provided in the name map """""" for spec in self.study_class.parameter_specs(): if spec.name not in self._name_map: yield spec","Parameter pecs in the sub-study class that are not explicitly provided in the name map" "def cli(source_f, raster_f, output, verbose): """""" Converts 2D geometries to 3D using GEOS sample through fiona. \b Example: drape point.shp elevation.tif -o point_z.shp """""" with fiona.open(source_f, 'r') as source: source_driver = source.driver source_crs = source.crs sink_schema = source.schema.copy() source_geom = source.schema['geometry'] if source_geom == 'Point': sink_schema['geometry'] = '3D Point' elif source_geom == 'LineString': sink_schema['geometry'] = '3D LineString' elif source_geom == '3D Point' or source_geom == '3D LineString': pass else: click.BadParameter(""Source geometry type {} not implemented"".format(source_geom)) with rasterio.open(raster_f) as raster: if source_crs != raster.crs: click.BadParameter(""Features and raster have different CRS."") if raster.count > 1: warnings.warn(""Found {0} bands in {1}, expected a single band raster"".format(raster.bands, raster_f)) supported = ['int16', 'int32', 'float32', 'float64'] if raster.dtypes[0] not in supported: warnings.warn(""Found {0} type in {1}, expected one of {2}"".format(raster.dtypes[0]), raster_f, supported) with fiona.open( output, 'w', driver=source_driver, crs=source_crs, schema=sink_schema) as sink: for feature in source: try: feature_z = drapery.drape(raster, feature) sink.write({ 'geometry': mapping(feature_z), 'properties': feature['properties'], }) except Exception: logging.exception(""Error processing feature %s:"", feature['id'])","Converts 2D geometries to 3D using GEOS sample through fiona. \b Example: drape point.shp elevation.tif -o point_z.shp" "def set_threshold_override(self, limit_name, warn_percent=None, warn_count=None, crit_percent=None, crit_count=None): """""" Override the default warning and critical thresholds used to evaluate the specified limit's usage. Theresholds can be specified as a percentage of the limit, or as a usage count, or both. :param warn_percent: new warning threshold, percentage used :type warn_percent: int :param warn_count: new warning threshold, actual count/number :type warn_count: int :param crit_percent: new critical threshold, percentage used :type crit_percent: int :param crit_count: new critical threshold, actual count/number :type crit_count: int """""" try: self.limits[limit_name].set_threshold_override( warn_percent=warn_percent, warn_count=warn_count, crit_percent=crit_percent, crit_count=crit_count ) except KeyError: raise ValueError(""{s} service has no '{l}' limit"".format( s=self.service_name, l=limit_name))","Override the default warning and critical thresholds used to evaluate the specified limit's usage. Theresholds can be specified as a percentage of the limit, or as a usage count, or both. :param warn_percent: new warning threshold, percentage used :type warn_percent: int :param warn_count: new warning threshold, actual count/number :type warn_count: int :param crit_percent: new critical threshold, percentage used :type crit_percent: int :param crit_count: new critical threshold, actual count/number :type crit_count: int" "def block(self, signals=None, isBlocked=True): """""" Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to """""" if signals: try: if isinstance(signals, basestring): signals = [signals] except NameError: if isinstance(signals, str): signals = [signals] signals = signals or self.keys() for signal in signals: if signal not in self: raise RuntimeError(""Could not find signal matching %s"" % signal) self[signal].block(isBlocked)","Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to" "def parse_config(lines, module=None): """"""Parse a config file. Names referenced within the config file are found within the calling scope. For example:: >>> from potpy.configparser import parse_config >>> class foo: ... @staticmethod ... def bar(): ... pass ... >>> config = ''' ... /foo: ... foo.bar ... ''' >>> router = parse_config(config.splitlines()) would find the ``bar`` method of the ``foo`` class, because ``foo`` is in the same scope as the call to parse_config. :param lines: An iterable of configuration lines (an open file object will do). :param module: Optional. If provided and not None, look for referenced names within this object instead of the calling module. """""" if module is None: module = _calling_scope(2) lines = IndentChecker(lines) path_router = PathRouter() for depth, line in lines: if depth > 0: raise SyntaxError('unexpected indent') name, path, types = parse_path_spec(line) if types: template_arg = (path, dict( (k, find_object(module, v)) for k, v in types.iteritems() )) else: template_arg = path handler = read_handler_block(lines, module) path_router.add(name, template_arg, handler) return path_router","Parse a config file. Names referenced within the config file are found within the calling scope. For example:: >>> from potpy.configparser import parse_config >>> class foo: ... @staticmethod ... def bar(): ... pass ... >>> config = ''' ... /foo: ... foo.bar ... ''' >>> router = parse_config(config.splitlines()) would find the ``bar`` method of the ``foo`` class, because ``foo`` is in the same scope as the call to parse_config. :param lines: An iterable of configuration lines (an open file object will do). :param module: Optional. If provided and not None, look for referenced names within this object instead of the calling module." "def bottom_sections(self): """""" The number of cells that touch the bottom side. Returns ------- sections : int The number of sections on the top """""" bottom_line = self.text.split('\n')[-1] sections = len(bottom_line.split('+')) - 2 return sections","The number of cells that touch the bottom side. Returns ------- sections : int The number of sections on the top" "def _clone_node(self, node): """"""Return new node based on an existing one. This is normally for when a node dies, this will copy the spec of the existing node and create a new one with a new id. The new node will have been setup so it will start calling the ""worker_*"" hooks and do work soon. """""" spec = node.gateway.spec spec.id = None self.nodemanager.group.allocate_id(spec) node = self.nodemanager.setup_node(spec, self.queue.put) self._active_nodes.add(node) return node","Return new node based on an existing one. This is normally for when a node dies, this will copy the spec of the existing node and create a new one with a new id. The new node will have been setup so it will start calling the ""worker_*"" hooks and do work soon." "def _add_markers(self, markers, tar, files_in_layers, added_symlinks): """""" This method is responsible for adding back all markers that were not added to the squashed layer AND files they refer to can be found in layers we do not squash. """""" if markers: self.log.debug(""Marker files to add: %s"" % [o.name for o in markers.keys()]) else: # No marker files to add return # https://github.com/goldmann/docker-squash/issues/108 # Some tar archives do have the filenames prefixed with './' # which does not have any effect when we unpack the tar achive, # but when processing tar content - we see this. tar_files = [self._normalize_path(x) for x in tar.getnames()] for marker, marker_file in six.iteritems(markers): actual_file = marker.name.replace('.wh.', '') normalized_file = self._normalize_path(actual_file) should_be_added_back = False if self._file_should_be_skipped(normalized_file, added_symlinks): self.log.debug( ""Skipping '%s' marker file, this file is on a symlink path"" % normalized_file) continue if normalized_file in tar_files: self.log.debug( ""Skipping '%s' marker file, this file was added earlier for some reason..."" % normalized_file) continue if files_in_layers: for files in files_in_layers.values(): if normalized_file in files: should_be_added_back = True break else: # There are no previous layers, so we need to add it back # In fact this shouldn't happen since having a marker file # where there is no previous layer does not make sense. should_be_added_back = True if should_be_added_back: self.log.debug( ""Adding '%s' marker file back..."" % marker.name) # Marker files on AUFS are hardlinks, we need to create # regular files, therefore we need to recreate the tarinfo # object tar.addfile(tarfile.TarInfo(name=marker.name), marker_file) # Add the file name to the list too to avoid re-reading all files # in tar archive tar_files.append(normalized_file) else: self.log.debug( ""Skipping '%s' marker file..."" % marker.name)","This method is responsible for adding back all markers that were not added to the squashed layer AND files they refer to can be found in layers we do not squash." "def _cast_field(self, cast_to, value): """""" Convert field type from raw bytes to native python type :param cast_to: native python type to cast to :type cast_to: a type object (one of bytes, int, unicode (str for py3k)) :param value: raw value from the database :type value: bytes :return: converted value :rtype: value of native python type (one of bytes, int, unicode (str for py3k)) """""" if cast_to in (int, long, str): return cast_to(value) elif cast_to == unicode: try: value = value.decode(self.charset, self.errors) except UnicodeEncodeError, e: raise InvalidData(""Error encoding unicode value '%s': %s"" % (repr(value), e)) return value elif cast_to in (any, bytes): return value else: raise TypeError(""Invalid field type %s"" % (cast_to))","Convert field type from raw bytes to native python type :param cast_to: native python type to cast to :type cast_to: a type object (one of bytes, int, unicode (str for py3k)) :param value: raw value from the database :type value: bytes :return: converted value :rtype: value of native python type (one of bytes, int, unicode (str for py3k))" "def normalize_version(version): """"""\ Canonicalizes the provided `version`. If the `version` is ``None``, this function returns ``None``. Otherwise this function checks if `version` is an integer or a Micro QR Code version. In case the string represents a Micro QR Code version, an uppercased string identifier is returned. If the `version` does not represent a valid version identifier (aside of ``None``, a VersionError is raised. :param version: An integer, a string or ``None``. :raises: VersionError: In case the version is not ``None`` and does not represent a valid (Micro) QR Code version. :rtype: int, str or ``None`` """""" if version is None: return None error = False try: version = int(version) # Don't want Micro QR Code constants as input error = version < 1 except (ValueError, TypeError): try: version = consts.MICRO_VERSION_MAPPING[version.upper()] except (KeyError, AttributeError): error = True if error or not 0 < version < 41 and version not in consts.MICRO_VERSIONS: raise VersionError('Unsupported version ""{0}"". ' 'Supported: {1} and 1 .. 40' .format(version, ', '.join(sorted(consts.MICRO_VERSION_MAPPING.keys())))) return version","\ Canonicalizes the provided `version`. If the `version` is ``None``, this function returns ``None``. Otherwise this function checks if `version` is an integer or a Micro QR Code version. In case the string represents a Micro QR Code version, an uppercased string identifier is returned. If the `version` does not represent a valid version identifier (aside of ``None``, a VersionError is raised. :param version: An integer, a string or ``None``. :raises: VersionError: In case the version is not ``None`` and does not represent a valid (Micro) QR Code version. :rtype: int, str or ``None``" "def get_url_array(self): """""" Get all url-objects in an array :return sites (array): The sites from the JSON-file """""" urlarray = [] for urlobjects in self.__json_object[""base_urls""]: urlarray.append(urlobjects[""url""]) return urlarray","Get all url-objects in an array :return sites (array): The sites from the JSON-file" "def set_param(self, param, value, header='Content-Type', requote=True, charset=None, language=''): """"""Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to ""text/plain"" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings. """""" if not isinstance(value, tuple) and charset: value = (charset, language, value) if header not in self and header.lower() == 'content-type': ctype = 'text/plain' else: ctype = self.get(header) if not self.get_param(param, header=header): if not ctype: ctype = _formatparam(param, value, requote) else: ctype = SEMISPACE.join( [ctype, _formatparam(param, value, requote)]) else: ctype = '' for old_param, old_value in self.get_params(header=header, unquote=requote): append_param = '' if old_param.lower() == param.lower(): append_param = _formatparam(param, value, requote) else: append_param = _formatparam(old_param, old_value, requote) if not ctype: ctype = append_param else: ctype = SEMISPACE.join([ctype, append_param]) if ctype != self.get(header): del self[header] self[header] = ctype","Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to ""text/plain"" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings." "def get_fixed_param_names(self) -> List[str]: """""" Get the fixed params of the network. :return: List of strings, names of the layers """""" args = set(self.args.keys()) | set(self.auxs.keys()) return list(args & set(self.sym.list_arguments()))","Get the fixed params of the network. :return: List of strings, names of the layers" "def build_project(self): """""" Build IAR project """""" # > IarBuild [project_path] -build [project_name] proj_path = join(getcwd(), self.workspace['files']['ewp']) if proj_path.split('.')[-1] != 'ewp': proj_path += '.ewp' if not os.path.exists(proj_path): logger.debug(""The file: %s does not exists, exported prior building?"" % proj_path) return -1 logger.debug(""Building IAR project: %s"" % proj_path) args = [join(self.env_settings.get_env_settings('iar'), 'IarBuild.exe'), proj_path, '-build', os.path.splitext(os.path.basename(self.workspace['files']['ewp']))[0]] logger.debug(args) try: p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) output, err = p.communicate() except: logger.error(""Project: %s build failed. Please check IARBUILD path in the user_settings.py file."" % self.workspace['files']['ewp']) return -1 else: build_log_path = os.path.join(os.path.dirname(proj_path),'build_log.txt') with open(build_log_path, 'w') as f: f.write(output) num_errors = self._parse_subprocess_output(output) if num_errors == 0: logger.info(""Project: %s build completed."" % self.workspace['files']['ewp']) return 0 else: logger.error(""Project: %s build failed with %d errors"" % (self.workspace['files']['ewp'], num_errors)) return -1",Build IAR project "def getUsage(api_key=None, api_version=None): ''' Show current usages statistics :param api_key: The Random.org api key. :param api_version: The Random.org api version. :return: The current usage statistics. CLI Example: .. code-block:: bash salt '*' random_org.getUsage salt '*' random_org.getUsage api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=1 ''' ret = {'res': True} if not api_key or not api_version: try: options = __salt__['config.option']('random_org') if not api_key: api_key = options.get('api_key') if not api_version: api_version = options.get('api_version') except (NameError, KeyError, AttributeError): log.error('No Random.org api key found.') ret['message'] = 'No Random.org api key or api version found.' ret['res'] = False return ret if isinstance(api_version, int): api_version = six.text_type(api_version) _function = RANDOM_ORG_FUNCTIONS.get(api_version).get('getUsage').get('method') data = {} data['id'] = 1911220 data['jsonrpc'] = '2.0' data['method'] = _function data['params'] = {'apiKey': api_key} result = _query(api_version=api_version, data=data) if result: ret['bitsLeft'] = result.get('bitsLeft') ret['requestsLeft'] = result.get('requestsLeft') ret['totalBits'] = result.get('totalBits') ret['totalRequests'] = result.get('totalRequests') else: ret['res'] = False ret['message'] = result['message'] return ret","Show current usages statistics :param api_key: The Random.org api key. :param api_version: The Random.org api version. :return: The current usage statistics. CLI Example: .. code-block:: bash salt '*' random_org.getUsage salt '*' random_org.getUsage api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=1" "def _assemble_and_send_validation_request(self): """""" Fires off the Fedex shipment validation request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_validation_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """""" # Fire off the query. return self.client.service.validateShipment( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, RequestedShipment=self.RequestedShipment)","Fires off the Fedex shipment validation request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_validation_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED." "def assert_false(expr, msg_fmt=""{msg}""): """"""Fail the test unless the expression is falsy. >>> assert_false("""") >>> assert_false(""Hello World!"") Traceback (most recent call last): ... AssertionError: 'Hello World!' is not falsy The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression """""" if expr: msg = ""{!r} is not falsy"".format(expr) fail(msg_fmt.format(msg=msg, expr=expr))","Fail the test unless the expression is falsy. >>> assert_false("""") >>> assert_false(""Hello World!"") Traceback (most recent call last): ... AssertionError: 'Hello World!' is not falsy The following msg_fmt arguments are supported: * msg - the default error message * expr - tested expression" "def _create_content_body(self, body): """"""Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable """""" frames = int(math.ceil(len(body) / float(self._max_frame_size))) for offset in compatibility.RANGE(0, frames): start_frame = self._max_frame_size * offset end_frame = start_frame + self._max_frame_size body_len = len(body) if end_frame > body_len: end_frame = body_len yield pamqp_body.ContentBody(body[start_frame:end_frame])","Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable" "def get_video_transcript_data(video_id, language_code): """""" Get video transcript data Arguments: video_id(unicode): An id identifying the Video. language_code(unicode): it will be the language code of the requested transcript. Returns: A dict containing transcript file name and its content. """""" video_transcript = VideoTranscript.get_or_none(video_id, language_code) if video_transcript: try: return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read()) except Exception: logger.exception( '[edx-val] Error while retrieving transcript for video=%s -- language_code=%s', video_id, language_code ) raise","Get video transcript data Arguments: video_id(unicode): An id identifying the Video. language_code(unicode): it will be the language code of the requested transcript. Returns: A dict containing transcript file name and its content." "def on_response(self, ch, method_frame, props, body): """""" setup response is correlation id is the good one """""" LOGGER.debug(""rabbitmq.Requester.on_response"") if self.corr_id == props.correlation_id: self.response = {'props': props, 'body': body} else: LOGGER.warn(""rabbitmq.Requester.on_response - discarded response : "" + str(props.correlation_id)) LOGGER.debug(""natsd.Requester.on_response - discarded response : "" + str({ 'properties': props, 'body': body }))",setup response is correlation id is the good one "def _is_axis_allowed(self, axis): """"""Check if axis are allowed. In case the calculation is requested over CA items dimension, it is not valid. It's valid in all other cases. """""" if axis is None: # If table direction was requested, we must ensure that each slice # doesn't have the CA items dimension (thus the [-2:] part). It's # OK for the 0th dimension to be items, since no calculation is # performed over it. if DT.CA_SUBVAR in self.dim_types[-2:]: return False return True if isinstance(axis, int): if self.ndim == 1 and axis == 1: # Special allowed case of a 1D cube, where ""row"" # directions is requested. return True axis = [axis] # ---axis is a tuple--- for dim_idx in axis: if self.dim_types[dim_idx] == DT.CA_SUBVAR: # If any of the directions explicitly asked for directly # corresponds to the CA items dimension, the requested # calculation is not valid. return False return True","Check if axis are allowed. In case the calculation is requested over CA items dimension, it is not valid. It's valid in all other cases." "def _add_slide_number(self, slide_no): """"""Add the slide number to the output if enabled."""""" if self.builder.config.slide_numbers: self.body.append( '\n
%s
\n' % (slide_no,), )",Add the slide number to the output if enabled. "def get_language_stemmer(language): """"""Retrieves the SnowballStemmer for a particular language. Args: language (str): ISO-639-1 code of the language. """""" from lunr.languages import SUPPORTED_LANGUAGES from nltk.stem.snowball import SnowballStemmer return SnowballStemmer(SUPPORTED_LANGUAGES[language])","Retrieves the SnowballStemmer for a particular language. Args: language (str): ISO-639-1 code of the language." "def _run_default_moderator(comment, content_object, request): """""" Run the default moderator """""" # The default moderator will likely not check things like ""auto close"". # It can still provide akismet and bad word checking. if not default_moderator.allow(comment, content_object, request): # Comment will be disallowed outright (HTTP 403 response) return False if default_moderator.moderate(comment, content_object, request): comment.is_public = False",Run the default moderator "def _expressions_from_rules(self, rule_syntax, custom_rules): """"""Return the rules for parsing the grammar definition syntax. Return a 2-tuple: a dict of rule names pointing to their expressions, and then the top-level expression for the first rule. """""" # Hard-code enough of the rules to parse the grammar that describes the # grammar description language, to bootstrap: comment = Regex(r'#[^\r\n]*', name='comment') meaninglessness = OneOf(Regex(r'\s+'), comment, name='meaninglessness') _ = ZeroOrMore(meaninglessness, name='_') equals = Sequence(Literal('='), _, name='equals') label = Sequence(Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'), _, name='label') reference = Sequence(label, Not(equals), name='reference') quantifier = Sequence(Regex(r'[*+?]'), _, name='quantifier') # This pattern supports empty literals. TODO: A problem? spaceless_literal = Regex(r'u?r?""[^""\\]*(?:\\.[^""\\]*)*""', ignore_case=True, dot_all=True, name='spaceless_literal') literal = Sequence(spaceless_literal, _, name='literal') regex = Sequence(Literal('~'), literal, Regex('[ilmsuxa]*', ignore_case=True), _, name='regex') atom = OneOf(reference, literal, regex, name='atom') quantified = Sequence(atom, quantifier, name='quantified') term = OneOf(quantified, atom, name='term') not_term = Sequence(Literal('!'), term, _, name='not_term') term.members = (not_term,) + term.members sequence = Sequence(term, OneOrMore(term), name='sequence') or_term = Sequence(Literal('/'), _, term, name='or_term') ored = Sequence(term, OneOrMore(or_term), name='ored') expression = OneOf(ored, sequence, term, name='expression') rule = Sequence(label, equals, expression, name='rule') rules = Sequence(_, OneOrMore(rule), name='rules') # Use those hard-coded rules to parse the (more extensive) rule syntax. # (For example, unless I start using parentheses in the rule language # definition itself, I should never have to hard-code expressions for # those above.) rule_tree = rules.parse(rule_syntax) # Turn the parse tree into a map of expressions: return RuleVisitor().visit(rule_tree)","Return the rules for parsing the grammar definition syntax. Return a 2-tuple: a dict of rule names pointing to their expressions, and then the top-level expression for the first rule." "def copytree_hardlink(source, dest): """""" Recursively copy a directory ala shutils.copytree, but hardlink files instead of copying. Available on UNIX systems only. """""" copy2 = shutil.copy2 try: shutil.copy2 = os.link shutil.copytree(source, dest) finally: shutil.copy2 = copy2","Recursively copy a directory ala shutils.copytree, but hardlink files instead of copying. Available on UNIX systems only." "def create_new_attachment_by_content_id(self, content_id, attachments, callback=None): """""" Add one or more attachments to a Confluence Content entity, with optional comments. Comments are optional, but if included there must be as many comments as there are files, and the comments must be in the same order as the files. :param content_id (string): A string containing the id of the attachments content container. :param attachments (list of dicts or dict): This is a list of dictionaries or a dictionary. Each dictionary must have the key ""file"" with a value that is I/O like (file, StringIO, etc.), and may also have a key ""comment"" with a string for file comments. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """""" if isinstance(attachments, list): assert all(isinstance(at, dict) and ""file"" in list(at.keys()) for at in attachments) elif isinstance(attachments, dict): assert ""file"" in list(attachments.keys()) else: assert False return self._service_post_request(""rest/api/content/{id}/child/attachment"".format(id=content_id), headers={""X-Atlassian-Token"": ""nocheck""}, files=attachments, callback=callback)","Add one or more attachments to a Confluence Content entity, with optional comments. Comments are optional, but if included there must be as many comments as there are files, and the comments must be in the same order as the files. :param content_id (string): A string containing the id of the attachments content container. :param attachments (list of dicts or dict): This is a list of dictionaries or a dictionary. Each dictionary must have the key ""file"" with a value that is I/O like (file, StringIO, etc.), and may also have a key ""comment"" with a string for file comments. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially." "def modify(self, **kwargs): """"""Modify settings for a check. The provided settings will overwrite previous values. Settings not provided will stay the same as before the update. To clear an existing value, provide an empty value. Please note that you cannot change the type of a check once it has been created. General parameters: * name -- Check name Type: String * host - Target host Type: String * paused -- Check should be paused Type: Boolean * resolution -- Check resolution time (in minutes) Type: Integer [1, 5, 15, 30, 60] * contactids -- Comma separated list of contact IDs Type: String * sendtoemail -- Send alerts as email Type: Boolean * sendtosms -- Send alerts as SMS Type: Boolean * sendtotwitter -- Send alerts through Twitter Type: Boolean * sendtoiphone -- Send alerts to iPhone Type: Boolean * sendtoandroid -- Send alerts to Android Type: Boolean * sendnotificationwhendown -- Send notification when check is down the given number of times Type: Integer * notifyagainevery -- Set how many results to wait for in between notices Type: Integer * notifywhenbackup -- Notify when back up again Type: Boolean * use_legacy_notifications -- Use old notifications instead of BeepManager Type: Boolean * probe_filters -- Can be one of region: NA, region: EU, region: APAC Type: String HTTP check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * shouldcontain -- Target site should contain this string. Cannot be combined with 'shouldnotcontain' Type: String * shouldnotcontain -- Target site should not contain this string. Cannot be combined with 'shouldcontain' Type: String * postdata -- Data that should be posted to the web page, for example submission data for a sign-up or login form. The data needs to be formatted in the same way as a web browser would send it to the web server Type: String * requestheader -- Custom HTTP header, replace with desired header name. Header in form: Header:Value Type: String HTTPCustom check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * additionalurls -- Colon-separated list of additonal URLS with hostname included Type: String TCP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String DNS check options: * expectedip -- Expected IP Type: String * nameserver -- Nameserver to check Type: String UDP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String SMTP check options: * port -- Target server port Type: Integer * auth -- Username and password for target SMTP authentication. Example: user:password Type: String * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean POP3 check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean IMAP check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean """""" # Warn user about unhandled parameters for key in kwargs: if key not in ['paused', 'resolution', 'contactids', 'sendtoemail', 'sendtosms', 'sendtotwitter', 'sendtoiphone', 'sendnotificationwhendown', 'notifyagainevery', 'notifywhenbackup', 'created', 'type', 'hostname', 'status', 'lasterrortime', 'lasttesttime', 'url', 'encryption', 'port', 'auth', 'shouldcontain', 'shouldnotcontain', 'postdata', 'additionalurls', 'stringtosend', 'stringtoexpect', 'expectedip', 'nameserver', 'use_legacy_notifications', 'host', 'alert_policy', 'autoresolve', 'probe_filters']: sys.stderr.write(""'%s'"" % key + ' is not a valid argument of' + '.modify()\n') # If one of the legacy parameters is used, it is required to set the legacy flag. # https://github.com/KennethWilke/PingdomLib/issues/12 if any([k for k in kwargs if k in legacy_notification_parameters]): if ""use_legacy_notifications"" in kwargs and kwargs[""use_legacy_notifications""] != True: raise Exception(""Cannot set legacy parameter when use_legacy_notifications is not True"") kwargs[""use_legacy_notifications""] = True response = self.pingdom.request(""PUT"", 'checks/%s' % self.id, kwargs) return response.json()['message']","Modify settings for a check. The provided settings will overwrite previous values. Settings not provided will stay the same as before the update. To clear an existing value, provide an empty value. Please note that you cannot change the type of a check once it has been created. General parameters: * name -- Check name Type: String * host - Target host Type: String * paused -- Check should be paused Type: Boolean * resolution -- Check resolution time (in minutes) Type: Integer [1, 5, 15, 30, 60] * contactids -- Comma separated list of contact IDs Type: String * sendtoemail -- Send alerts as email Type: Boolean * sendtosms -- Send alerts as SMS Type: Boolean * sendtotwitter -- Send alerts through Twitter Type: Boolean * sendtoiphone -- Send alerts to iPhone Type: Boolean * sendtoandroid -- Send alerts to Android Type: Boolean * sendnotificationwhendown -- Send notification when check is down the given number of times Type: Integer * notifyagainevery -- Set how many results to wait for in between notices Type: Integer * notifywhenbackup -- Notify when back up again Type: Boolean * use_legacy_notifications -- Use old notifications instead of BeepManager Type: Boolean * probe_filters -- Can be one of region: NA, region: EU, region: APAC Type: String HTTP check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * shouldcontain -- Target site should contain this string. Cannot be combined with 'shouldnotcontain' Type: String * shouldnotcontain -- Target site should not contain this string. Cannot be combined with 'shouldcontain' Type: String * postdata -- Data that should be posted to the web page, for example submission data for a sign-up or login form. The data needs to be formatted in the same way as a web browser would send it to the web server Type: String * requestheader -- Custom HTTP header, replace with desired header name. Header in form: Header:Value Type: String HTTPCustom check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * additionalurls -- Colon-separated list of additonal URLS with hostname included Type: String TCP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String DNS check options: * expectedip -- Expected IP Type: String * nameserver -- Nameserver to check Type: String UDP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String SMTP check options: * port -- Target server port Type: Integer * auth -- Username and password for target SMTP authentication. Example: user:password Type: String * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean POP3 check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean IMAP check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean" "def recreate_grams(self): """"""Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` """""" session = self.Session() for document in session.query(Document).all(): logger.info(document.text) grams = self._get_grams(session, document.text, make=True) document.grams = list(grams) broken_links = session.query(Gram) \ .filter(~Gram.documents.any()).all() for gram in broken_links: session.delete(gram) session.commit()","Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session`" "def create_toolbox(self, filename): """""" Creates a new Python toolbox where each task name is a GPTool in the toolbox. :param filename: the filename of the generated toolbox :param service_name: The name of the ESE service containing the tasks. Only tasks from one service may be used. :param tasks: The list of tasks from the service to build as GPTools. """""" filename = os.path.splitext(filename)[0] label = os.path.basename(filename) # Get task information first so we can build the tool list tool_list = [] for task in self.tasks: tool_list.append(task.name) file_descriptor = os.open(filename + '.pyt', os.O_WRONLY | os.O_CREAT | os.O_EXCL) with os.fdopen(file_descriptor, 'w') as self.toolbox_file: self.toolbox_file.write(self._imports_template.substitute({})) toolbox_class = self._toolbox_class_template.substitute( {'label': label, 'alias': self.alias, 'toolList': param_builder.convert_list(tool_list) } ) self.toolbox_file.write(toolbox_class) for task in self.tasks: gp_tool = self.create_tool(task) self.toolbox_file.write(gp_tool) toolbox_help_filename = '.'.join((filename, task.name, 'pyt', 'xml')) help_builder.create(toolbox_help_filename, task, self.alias) return filename","Creates a new Python toolbox where each task name is a GPTool in the toolbox. :param filename: the filename of the generated toolbox :param service_name: The name of the ESE service containing the tasks. Only tasks from one service may be used. :param tasks: The list of tasks from the service to build as GPTools." "def addVote(self, prepare: Prepare, voter: str) -> None: """""" Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE """""" self._add_msg(prepare, voter)","Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE" "def keyReleaseEvent(self, event): """"""Reimplement Qt method. Handle ""most recent used"" tab behavior, When ctrl is released and tab_switcher is visible, tab will be changed. """""" if self.isVisible(): qsc = get_shortcut(context='Editor', name='Go to next file') for key in qsc.split('+'): key = key.lower() if ((key == 'ctrl' and event.key() == Qt.Key_Control) or (key == 'alt' and event.key() == Qt.Key_Alt)): self.item_selected() event.accept()","Reimplement Qt method. Handle ""most recent used"" tab behavior, When ctrl is released and tab_switcher is visible, tab will be changed." "def _set_windows(self, ticks, bars): """""" be aware of default windows """""" self.tick_window = ticks self.bar_window = bars",be aware of default windows "def read(self, table, columns, keyset, index="""", limit=0, partition=None): """"""Perform a ``StreamingRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type limit: int :param limit: (Optional) maximum number of rows to return. Incompatible with ``partition``. :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_read`. Incompatible with ``limit``. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. """""" if self._read_request_count > 0: if not self._multi_use: raise ValueError(""Cannot re-use single-use snapshot."") if self._transaction_id is None: raise ValueError(""Transaction ID pending."") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() restart = functools.partial( api.streaming_read, self._session.name, table, columns, keyset._to_pb(), transaction=transaction, index=index, limit=limit, partition_token=partition, metadata=metadata, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)","Perform a ``StreamingRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type limit: int :param limit: (Optional) maximum number of rows to return. Incompatible with ``partition``. :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_read`. Incompatible with ``limit``. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots." "def _replay_index(replay_dir): """"""Output information for a directory of replays."""""" run_config = run_configs.get() replay_dir = run_config.abs_replay_path(replay_dir) print(""Checking: "", replay_dir) with run_config.start(want_rgb=False) as controller: print(""-"" * 60) print("","".join(( ""filename"", ""build"", ""map_name"", ""game_duration_loops"", ""players"", ""P1-outcome"", ""P1-race"", ""P1-apm"", ""P2-race"", ""P2-apm"", ))) try: bad_replays = [] for file_path in run_config.replay_paths(replay_dir): file_name = os.path.basename(file_path) try: info = controller.replay_info(run_config.replay_data(file_path)) except remote_controller.RequestError as e: bad_replays.append(""%s: %s"" % (file_name, e)) continue if info.HasField(""error""): print(""failed:"", file_name, info.error, info.error_details) bad_replays.append(file_name) else: out = [ file_name, info.base_build, info.map_name, info.game_duration_loops, len(info.player_info), sc_pb.Result.Name(info.player_info[0].player_result.result), sc_common.Race.Name(info.player_info[0].player_info.race_actual), info.player_info[0].player_apm, ] if len(info.player_info) >= 2: out += [ sc_common.Race.Name( info.player_info[1].player_info.race_actual), info.player_info[1].player_apm, ] print(u"","".join(str(s) for s in out)) except KeyboardInterrupt: pass finally: if bad_replays: print(""\n"") print(""Replays with errors:"") print(""\n"".join(bad_replays))",Output information for a directory of replays. "def _check(self, args): """"""Exit in case of multiple exclusive arguments."""""" if sum(bool(args[arg]) for arg in self._mapping) > 1: raise DocoptExit(_('These options are mutually exclusive: {0}', ', '.join(self._mapping)))",Exit in case of multiple exclusive arguments. "def is_parent_of_objective(self, id_=None, objective_id=None): """"""Tests if an Id is a direct parent of an objective. arg: id (osid.id.Id): an Id arg: objective_id (osid.id.Id): the Id of an objective return: (boolean) - true if this id is a parent of objective_id, false otherwise raise: NotFound - objective_id is not found raise: NullArgument - id or objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. implementation notes: If id not found return false. """""" if id_ is None or objective_id is None: raise NullArgument() return id_ in list(self.get_parent_objective_ids(objective_id))","Tests if an Id is a direct parent of an objective. arg: id (osid.id.Id): an Id arg: objective_id (osid.id.Id): the Id of an objective return: (boolean) - true if this id is a parent of objective_id, false otherwise raise: NotFound - objective_id is not found raise: NullArgument - id or objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. implementation notes: If id not found return false." "def context(self, name, ctx): """""" Execute the block with the given context applied. This manager ensures that the context is removed even if an exception is raised within the context. """""" self.enter_context(name, ctx) try: yield finally: self.exit_context(name)","Execute the block with the given context applied. This manager ensures that the context is removed even if an exception is raised within the context." "def run_command(self, command, message): """""" Use subprocess; feed the message to our command over stdin """""" proc = subprocess.Popen([ 'echo \'%s\' | %s' % (fedmsg.encoding.dumps(message), command) ], shell=True, executable='/bin/bash') return proc.wait()",Use subprocess; feed the message to our command over stdin "def _http_get(self, url): """""" Make an HTTP GET request to the specified URL and return the response. Retries ------- The constructor of this class takes an argument specifying the number of times to retry a GET. The statuses which are retried on are: 408, 500, 502, 503, and 504. Returns ------- An HTTP response, containing response headers and content. Exceptions ---------- * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsRateLimitExceeded """""" for try_number in range(self._http_retries + 1): response = requests.get(url, timeout=self._http_timeout) if response.status_code == 200: return response if (try_number >= self._http_retries or response.status_code not in (408, 500, 502, 503, 504)): if response.status_code >= 500: raise PythonKCMeetupsMeetupDown(response, response.content) if response.status_code == 400: try: data = json.loads(response.content) if data.get('code', None) == 'limit': raise PythonKCMeetupsRateLimitExceeded except: # Don't lose original error when JSON is bad pass raise PythonKCMeetupsBadResponse(response, response.content)","Make an HTTP GET request to the specified URL and return the response. Retries ------- The constructor of this class takes an argument specifying the number of times to retry a GET. The statuses which are retried on are: 408, 500, 502, 503, and 504. Returns ------- An HTTP response, containing response headers and content. Exceptions ---------- * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsRateLimitExceeded" "def print_event_count(): """"""Print the number of events grouped by source."""""" for source in archive.list_event_sources(): event_count = 0 for group in archive.list_event_histogram(source): for rec in group.records: event_count += rec.count print(' {: <40} {: >20}'.format(source, event_count))",Print the number of events grouped by source. "def write_packed(self, outfile, rows): """""" Write PNG file to `outfile`. The pixel data comes from `rows` which should be in boxed row packed format. Each row should be a sequence of packed bytes. Technically, this method does work for interlaced images but it is best avoided. For interlaced images, the rows should be presented in the order that they appear in the file. This method should not be used when the source image bit depth is not one naturally supported by PNG; the bit depth should be 1, 2, 4, 8, or 16. """""" if self.rescale: raise Error(""write_packed method not suitable for bit depth %d"" % self.rescale[0]) return self.write_passes(outfile, rows, packed=True)","Write PNG file to `outfile`. The pixel data comes from `rows` which should be in boxed row packed format. Each row should be a sequence of packed bytes. Technically, this method does work for interlaced images but it is best avoided. For interlaced images, the rows should be presented in the order that they appear in the file. This method should not be used when the source image bit depth is not one naturally supported by PNG; the bit depth should be 1, 2, 4, 8, or 16." "def save_form(self, request, form, change): """""" Set the object's owner as the logged in user. """""" obj = form.save(commit=False) if obj.user_id is None: obj.user = request.user return super(OwnableAdmin, self).save_form(request, form, change)",Set the object's owner as the logged in user. "def file_list(self, tgt_env): ''' Get file list for the target environment using pygit2 ''' def _traverse(tree, blobs, prefix): ''' Traverse through a pygit2 Tree object recursively, accumulating all the file paths and symlink info in the ""blobs"" dict ''' for entry in iter(tree): if entry.oid not in self.repo: # Entry is a submodule, skip it continue obj = self.repo[entry.oid] if isinstance(obj, pygit2.Blob): repo_path = salt.utils.path.join( prefix, entry.name, use_posixpath=True) blobs.setdefault('files', []).append(repo_path) if stat.S_ISLNK(tree[entry.name].filemode): link_tgt = self.repo[tree[entry.name].oid].data blobs.setdefault('symlinks', {})[repo_path] = link_tgt elif isinstance(obj, pygit2.Tree): _traverse( obj, blobs, salt.utils.path.join( prefix, entry.name, use_posixpath=True) ) files = set() symlinks = {} tree = self.get_tree(tgt_env) if not tree: # Not found, return empty objects return files, symlinks if self.root(tgt_env): try: # This might need to be changed to account for a root that # spans more than one directory oid = tree[self.root(tgt_env)].oid tree = self.repo[oid] except KeyError: return files, symlinks if not isinstance(tree, pygit2.Tree): return files, symlinks relpath = lambda path: os.path.relpath(path, self.root(tgt_env)) else: relpath = lambda path: path blobs = {} if tree: _traverse(tree, blobs, self.root(tgt_env)) add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for repo_path in blobs.get('files', []): files.add(add_mountpoint(relpath(repo_path))) for repo_path, link_tgt in six.iteritems(blobs.get('symlinks', {})): symlinks[add_mountpoint(relpath(repo_path))] = link_tgt return files, symlinks",Get file list for the target environment using pygit2 "def cyber_observable_check(original_function): """"""Decorator for functions that require cyber observable data. """""" def new_function(*args, **kwargs): if not has_cyber_observable_data(args[0]): return func = original_function(*args, **kwargs) if isinstance(func, Iterable): for x in original_function(*args, **kwargs): yield x new_function.__name__ = original_function.__name__ return new_function",Decorator for functions that require cyber observable data. "def list(self, identity_id, per_page=20, page=1): """""" Get a list of tokens :param identity_id: The ID of the identity to retrieve tokens for :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """""" params = {'per_page': per_page, 'page': page} return self.request.get(str(identity_id) + '/token', params)","Get a list of tokens :param identity_id: The ID of the identity to retrieve tokens for :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`" "def extraSelections(self): """""" In normal mode - QTextEdit.ExtraSelection which highlightes the cursor """""" if not isinstance(self._mode, Normal): return [] selection = QTextEdit.ExtraSelection() selection.format.setBackground(QColor('#ffcc22')) selection.format.setForeground(QColor('#000000')) selection.cursor = self._qpart.textCursor() selection.cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor) return [selection]",In normal mode - QTextEdit.ExtraSelection which highlightes the cursor "def _normalize_server_url(self, server): """""" Check if the server URL starts with a HTTP or HTTPS scheme, fall back to http if not present """""" server = server if server.startswith((""http://"", ""https://"")) else ""http://{}"".format(server) return server","Check if the server URL starts with a HTTP or HTTPS scheme, fall back to http if not present" "def open(cls, name=None, mode=""r"", fileobj=None, bufsize=RECORDSIZE, **kwargs): """"""Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing """""" if not name and not fileobj: raise ValueError(""nothing to open"") if mode in (""r"", ""r:*""): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) if fileobj is not None: saved_pos = fileobj.tell() try: return func(name, ""r"", fileobj, **kwargs) except (ReadError, CompressionError) as e: if fileobj is not None: fileobj.seek(saved_pos) continue raise ReadError(""file could not be opened successfully"") elif "":"" in mode: filemode, comptype = mode.split("":"", 1) filemode = filemode or ""r"" comptype = comptype or ""tar"" # Select the *open() function according to # given compression. if comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) else: raise CompressionError(""unknown compression type %r"" % comptype) return func(name, filemode, fileobj, **kwargs) elif ""|"" in mode: filemode, comptype = mode.split(""|"", 1) filemode = filemode or ""r"" comptype = comptype or ""tar"" if filemode not in ""rw"": raise ValueError(""mode must be 'r' or 'w'"") stream = _Stream(name, filemode, comptype, fileobj, bufsize) try: t = cls(name, filemode, stream, **kwargs) except: stream.close() raise t._extfileobj = False return t elif mode in ""aw"": return cls.taropen(name, mode, fileobj, **kwargs) raise ValueError(""undiscernible mode"")","Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing" "def filter_properties_target(namespaces_iter, resource_types, properties_target): """"""Filter metadata namespaces. Filtering is done based ongiven resource types and a properties target. :param namespaces_iter: Metadata namespaces iterable. :param resource_types: List of resource type names. :param properties_target: Name of the properties target. """""" def filter_namespace(namespace): for asn in namespace.get('resource_type_associations'): if (asn.get('name') in resource_types and asn.get('properties_target') == properties_target): return True return False return filter(filter_namespace, namespaces_iter)","Filter metadata namespaces. Filtering is done based ongiven resource types and a properties target. :param namespaces_iter: Metadata namespaces iterable. :param resource_types: List of resource type names. :param properties_target: Name of the properties target." "def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False): """"""This generates an artifact knowledge base from a GRR client. Args: client_obj: A GRRClient object which is opened for reading. allow_uninitialized: If True we accept an uninitialized knowledge_base. Returns: A KnowledgeBase semantic value. Raises: ArtifactProcessingError: If called when the knowledge base has not been initialized. KnowledgeBaseUninitializedError: If we failed to initialize the knowledge base. This is needed so that the artifact library has a standardized interface to the data that is actually stored in the GRRClient object in the GRR datastore. We expect that the client KNOWLEDGE_BASE is already filled out through the, KnowledgeBaseInitialization flow, but attempt to make some intelligent guesses if things failed. """""" client_schema = client_obj.Schema kb = client_obj.Get(client_schema.KNOWLEDGE_BASE) if not allow_uninitialized: if not kb: raise artifact_utils.KnowledgeBaseUninitializedError( ""KnowledgeBase empty for %s."" % client_obj.urn) if not kb.os: raise artifact_utils.KnowledgeBaseAttributesMissingError( ""KnowledgeBase missing OS for %s. Knowledgebase content: %s"" % (client_obj.urn, kb)) if not kb: kb = client_schema.KNOWLEDGE_BASE() SetCoreGRRKnowledgeBaseValues(kb, client_obj) if kb.os == ""Windows"": # Add fallback values. if not kb.environ_allusersappdata and kb.environ_allusersprofile: # Guess if we don't have it already. if kb.os_major_version >= 6: kb.environ_allusersappdata = u""c:\\programdata"" kb.environ_allusersprofile = u""c:\\programdata"" else: kb.environ_allusersappdata = (u""c:\\documents and settings\\All Users\\"" ""Application Data"") kb.environ_allusersprofile = u""c:\\documents and settings\\All Users"" return kb","This generates an artifact knowledge base from a GRR client. Args: client_obj: A GRRClient object which is opened for reading. allow_uninitialized: If True we accept an uninitialized knowledge_base. Returns: A KnowledgeBase semantic value. Raises: ArtifactProcessingError: If called when the knowledge base has not been initialized. KnowledgeBaseUninitializedError: If we failed to initialize the knowledge base. This is needed so that the artifact library has a standardized interface to the data that is actually stored in the GRRClient object in the GRR datastore. We expect that the client KNOWLEDGE_BASE is already filled out through the, KnowledgeBaseInitialization flow, but attempt to make some intelligent guesses if things failed." "def _send_and_wait(self, **kwargs): """""" Send a frame to either the local ZigBee or a remote device and wait for a pre-defined amount of time for its response. """""" frame_id = self.next_frame_id kwargs.update(dict(frame_id=frame_id)) self._send(**kwargs) timeout = datetime.now() + const.RX_TIMEOUT while datetime.now() < timeout: try: frame = self._rx_frames.pop(frame_id) raise_if_error(frame) return frame except KeyError: sleep(0.1) continue _LOGGER.exception( ""Did not receive response within configured timeout period."") raise exceptions.ZigBeeResponseTimeout()","Send a frame to either the local ZigBee or a remote device and wait for a pre-defined amount of time for its response." "def middleware(self, *args, **kwargs): """""" Create a blueprint middleware from a decorated function. :param args: Positional arguments to be used while invoking the middleware :param kwargs: optional keyword args that can be used with the middleware. """""" def register_middleware(_middleware): future_middleware = FutureMiddleware(_middleware, args, kwargs) self.middlewares.append(future_middleware) return _middleware # Detect which way this was called, @middleware or @middleware('AT') if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): middleware = args[0] args = [] return register_middleware(middleware) else: if kwargs.get(""bp_group"") and callable(args[0]): middleware = args[0] args = args[1:] kwargs.pop(""bp_group"") return register_middleware(middleware) else: return register_middleware","Create a blueprint middleware from a decorated function. :param args: Positional arguments to be used while invoking the middleware :param kwargs: optional keyword args that can be used with the middleware." "def pong(self, message=None): '''Write a pong ``frame``. ''' return self.write(self.parser.pong(message), encode=False)",Write a pong ``frame``. "def _validate_field(param, fields): """""" Ensure the field exists on the model """""" if '/' not in param.field and param.field not in fields: raise InvalidQueryParams(**{ 'detail': 'The filter query param of ""%s"" is not possible. The ' 'resource requested does not have a ""%s"" field. Please ' 'modify your request & retry.' % (param, param.field), 'links': LINK, 'parameter': PARAM, })",Ensure the field exists on the model "def _PreparedData(self, order_by=()): """"""Prepares the data for enumeration - sorting it by order_by. Args: order_by: Optional. Specifies the name of the column(s) to sort by, and (optionally) which direction to sort in. Default sort direction is asc. Following formats are accepted: ""string_col_name"" -- For a single key in default (asc) order. (""string_col_name"", ""asc|desc"") -- For a single key. [(""col_1"",""asc|desc""), (""col_2"",""asc|desc"")] -- For more than one column, an array of tuples of (col_name, ""asc|desc""). Returns: The data sorted by the keys given. Raises: DataTableException: Sort direction not in 'asc' or 'desc' """""" if not order_by: return self.__data sorted_data = self.__data[:] if isinstance(order_by, six.string_types) or ( isinstance(order_by, tuple) and len(order_by) == 2 and order_by[1].lower() in [""asc"", ""desc""]): order_by = (order_by,) for key in reversed(order_by): if isinstance(key, six.string_types): sorted_data.sort(key=lambda x: x[0].get(key)) elif (isinstance(key, (list, tuple)) and len(key) == 2 and key[1].lower() in (""asc"", ""desc"")): key_func = lambda x: x[0].get(key[0]) sorted_data.sort(key=key_func, reverse=key[1].lower() != ""asc"") else: raise DataTableException(""Expected tuple with second value: "" ""'asc' or 'desc'"") return sorted_data","Prepares the data for enumeration - sorting it by order_by. Args: order_by: Optional. Specifies the name of the column(s) to sort by, and (optionally) which direction to sort in. Default sort direction is asc. Following formats are accepted: ""string_col_name"" -- For a single key in default (asc) order. (""string_col_name"", ""asc|desc"") -- For a single key. [(""col_1"",""asc|desc""), (""col_2"",""asc|desc"")] -- For more than one column, an array of tuples of (col_name, ""asc|desc""). Returns: The data sorted by the keys given. Raises: DataTableException: Sort direction not in 'asc' or 'desc'" "def approx_eq(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool: """"""Approximately compares two objects. If `val` implements SupportsApproxEquality protocol then it is invoked and takes precedence over all other checks: - For primitive numeric types `int` and `float` approximate equality is delegated to math.isclose(). - For complex primitive type the real and imaginary parts are treated independently and compared using math.isclose(). - For `val` and `other` both iterable of the same length, consecutive elements are compared recursively. Types of `val` and `other` does not necessarily needs to match each other. They just need to be iterable and have the same structure. Args: val: Source object for approximate comparison. other: Target object for approximate comparison. atol: The minimum absolute tolerance. See np.isclose() documentation for details. Defaults to 1e-8 which matches np.isclose() default absolute tolerance. Returns: True if objects are approximately equal, False otherwise. """""" # Check if val defines approximate equality via _approx_eq_. This takes # precedence over all other overloads. approx_eq_getter = getattr(val, '_approx_eq_', None) if approx_eq_getter is not None: result = approx_eq_getter(other, atol) if result is not NotImplemented: return result # The same for other to make approx_eq symmetric. other_approx_eq_getter = getattr(other, '_approx_eq_', None) if other_approx_eq_getter is not None: result = other_approx_eq_getter(val, atol) if result is not NotImplemented: return result # Compare primitive types directly. if isinstance(val, (int, float)): if not isinstance(other, (int, float)): return False return _isclose(val, other, atol=atol) if isinstance(val, complex): if not isinstance(other, complex): return False return _isclose(val, other, atol=atol) # Try to compare source and target recursively, assuming they're iterable. result = _approx_eq_iterables(val, other, atol=atol) # Fallback to __eq__() when anything else fails. if result is NotImplemented: return val == other return result","Approximately compares two objects. If `val` implements SupportsApproxEquality protocol then it is invoked and takes precedence over all other checks: - For primitive numeric types `int` and `float` approximate equality is delegated to math.isclose(). - For complex primitive type the real and imaginary parts are treated independently and compared using math.isclose(). - For `val` and `other` both iterable of the same length, consecutive elements are compared recursively. Types of `val` and `other` does not necessarily needs to match each other. They just need to be iterable and have the same structure. Args: val: Source object for approximate comparison. other: Target object for approximate comparison. atol: The minimum absolute tolerance. See np.isclose() documentation for details. Defaults to 1e-8 which matches np.isclose() default absolute tolerance. Returns: True if objects are approximately equal, False otherwise." "def _repr_mimebundle_(self, *args, **kwargs): """"""Display in a notebook or a server"""""" try: if self.logo: p = pn.Row( self.logo_panel, self.panel, margin=0) return p._repr_mimebundle_(*args, **kwargs) else: return self.panel._repr_mimebundle_(*args, **kwargs) except: raise RuntimeError(""Panel does not seem to be set up properly"")",Display in a notebook or a server "def datetime_parser(s): """""" Parse timestamp s in local time. First the arrow parser is used, if it fails, the parsedatetime parser is used. :param s: :return: """""" try: ts = arrow.get(s) # Convert UTC to local, result of get is UTC unless it specifies timezone, bonfire assumes # all time to be machine local if ts.tzinfo == arrow.get().tzinfo: ts = ts.replace(tzinfo='local') except: c = pdt.Calendar() result, what = c.parse(s) ts = None if what in (1, 2, 3): ts = datetime.datetime(*result[:6]) ts = arrow.get(ts) ts = ts.replace(tzinfo='local') return ts if ts is None: raise ValueError(""Cannot parse timestamp '"" + s + ""'"") return ts","Parse timestamp s in local time. First the arrow parser is used, if it fails, the parsedatetime parser is used. :param s: :return:" "def get_fields_with_visibility(self, visibility=""edit"", mode=""add""): """"""Return the fields with visibility """""" fields = self.get_sorted_fields() out = [] for field in fields: v = field.widget.isVisible( self.context, mode, default='invisible', field=field) if self.is_field_visible(field) is False: v = ""hidden"" visibility_guard = True # visibility_guard is a widget field defined in the schema in order # to know the visibility of the widget when the field is related to # a dynamically changing content such as workflows. For instance # those fields related to the workflow will be displayed only if # the workflow is enabled, otherwise they should not be shown. if 'visibility_guard' in dir(field.widget): visibility_guard = eval(field.widget.visibility_guard) if v == visibility and visibility_guard: out.append(field) return out",Return the fields with visibility "def GetTSKFileByPathSpec(self, path_spec): """"""Retrieves the SleuthKit file object for a path specification. Args: path_spec (PathSpec): path specification. Returns: pytsk3.File: TSK file. Raises: PathSpecError: if the path specification is missing inode and location. """""" # Opening a file by inode number is faster than opening a file # by location. inode = getattr(path_spec, 'inode', None) location = getattr(path_spec, 'location', None) if inode is not None: tsk_file = self._tsk_file_system.open_meta(inode=inode) elif location is not None: tsk_file = self._tsk_file_system.open(location) else: raise errors.PathSpecError( 'Path specification missing inode and location.') return tsk_file","Retrieves the SleuthKit file object for a path specification. Args: path_spec (PathSpec): path specification. Returns: pytsk3.File: TSK file. Raises: PathSpecError: if the path specification is missing inode and location." "def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed `, :py:mod:`pkg.removed `, and :py:mod:`pkg.purged ` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret","Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed `, :py:mod:`pkg.removed `, and :py:mod:`pkg.purged ` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0" "def setup(app): """""" Install the plugin. Arguments: app (sphinx.application.Sphinx): the Sphinx application context """""" app.add_config_value(""cache_path"", ""_cache"", """") try: os.makedirs(app.config.cache_path) except OSError as error: if error.errno != errno.EEXIST: raise path = os.path.join(app.config.cache_path, ""spec.html"") spec = fetch_or_load(path) app.add_role(""validator"", docutils_sucks(spec))","Install the plugin. Arguments: app (sphinx.application.Sphinx): the Sphinx application context" "def rename_file(self, fmfile, newname): """"""Rename file in transfer. :param fmfile: file data from filemail containing fileid :param newname: new file name :type fmfile: ``dict`` :type newname: ``str`` or ``unicode`` :rtype: ``bool`` """""" if not isinstance(fmfile, dict): raise FMBaseError('fmfile must be a ') method, url = get_URL('file_rename') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'fileid': fmfile.get('fileid'), 'filename': newname } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: self._complete = True return True hellraiser(res)","Rename file in transfer. :param fmfile: file data from filemail containing fileid :param newname: new file name :type fmfile: ``dict`` :type newname: ``str`` or ``unicode`` :rtype: ``bool``" "def parseWarnings(self, result): """""" Transform result in string to a dict object. @param result: a list of warnings in string @return: a dict of warnings """""" warnings = {} currentModule = None warningsCurrentModule = [] for line in result.splitlines(): if line.startswith(self.prefixModuleName): # Save results for previous module if currentModule: warnings[currentModule] = set(warningsCurrentModule) # Initial results for current module moduleName = line.replace(self.prefixModuleName, """") currentModule = moduleName warningsCurrentModule = [] elif re.search(self.regexLineStart, line): warningsCurrentModule.append(line) else: if warningsCurrentModule: warningsCurrentModule[-1] += ""\n"" + line # Save warnings for last module if currentModule: warnings[currentModule] = set(warningsCurrentModule) return warnings","Transform result in string to a dict object. @param result: a list of warnings in string @return: a dict of warnings" "def copy_resume(src_uri, dest_base_uri, config_path=None, progressbar=None): """"""Resume coping a dataset to another location. Items that have been copied to the destination and have the same size as in the source dataset are skipped. All other items are copied across and the dataset is frozen. :param src_uri: URI of dataset to be copied :param dest_base_uri: base of URI for copy target :param config_path: path to dtool configuration file :returns: URI of new dataset """""" dataset = DataSet.from_uri(src_uri) # Generate the URI of the destination proto dataset. dest_uri = _generate_uri(dataset._admin_metadata, dest_base_uri) proto_dataset = ProtoDataSet.from_uri(dest_uri) _copy_content(dataset, proto_dataset, progressbar) proto_dataset.freeze(progressbar=progressbar) return proto_dataset.uri","Resume coping a dataset to another location. Items that have been copied to the destination and have the same size as in the source dataset are skipped. All other items are copied across and the dataset is frozen. :param src_uri: URI of dataset to be copied :param dest_base_uri: base of URI for copy target :param config_path: path to dtool configuration file :returns: URI of new dataset" "def check_config(self, contents): """"""Process config contents with cdrouter-cli -check-config. :param contents: Config contents as string. :return: :class:`configs.CheckConfig ` object :rtype: configs.CheckConfig """""" schema = CheckConfigSchema() resp = self.service.post(self.base, params={'process': 'check'}, json={'contents': contents}) return self.service.decode(schema, resp)","Process config contents with cdrouter-cli -check-config. :param contents: Config contents as string. :return: :class:`configs.CheckConfig ` object :rtype: configs.CheckConfig" "def transfer(self, name, local, remote, **kwargs): """""" Transfers the file with the given name from the local to the remote storage backend. :param name: The name of the file to transfer :param local: The local storage backend instance :param remote: The remote storage backend instance :returns: `True` when the transfer succeeded, `False` if not. Retries the task when returning `False` :rtype: bool """""" try: remote.save(name, local.open(name)) return True except Exception as e: logger.error(""Unable to save '%s' to remote storage. "" ""About to retry."" % name) logger.exception(e) return False","Transfers the file with the given name from the local to the remote storage backend. :param name: The name of the file to transfer :param local: The local storage backend instance :param remote: The remote storage backend instance :returns: `True` when the transfer succeeded, `False` if not. Retries the task when returning `False` :rtype: bool" "def inserir(self, id_user, id_group): """"""Create a relationship between User and Group. :param id_user: Identifier of the User. Integer value and greater than zero. :param id_group: Identifier of the Group. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'user_group': {'id': < id_user_group >}} :raise InvalidParameterError: The identifier of User or Group is null and invalid. :raise GrupoUsuarioNaoExisteError: UserGroup not registered. :raise UsuarioNaoExisteError: User not registered. :raise UsuarioGrupoError: User already registered in the group. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """""" if not is_valid_int_param(id_user): raise InvalidParameterError( u'The identifier of User is invalid or was not informed.') if not is_valid_int_param(id_group): raise InvalidParameterError( u'The identifier of Group is invalid or was not informed.') url = 'usergroup/user/' + \ str(id_user) + '/ugroup/' + str(id_group) + '/associate/' code, xml = self.submit(None, 'PUT', url)","Create a relationship between User and Group. :param id_user: Identifier of the User. Integer value and greater than zero. :param id_group: Identifier of the Group. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'user_group': {'id': < id_user_group >}} :raise InvalidParameterError: The identifier of User or Group is null and invalid. :raise GrupoUsuarioNaoExisteError: UserGroup not registered. :raise UsuarioNaoExisteError: User not registered. :raise UsuarioGrupoError: User already registered in the group. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response." "def get_params(self, failobj=None, header='content-type', unquote=True): """"""Return the message's Content-Type parameters, as a list. The elements of the returned list are 2-tuples of key/value pairs, as split on the `=' sign. The left hand side of the `=' is the key, while the right hand side is the value. If there is no `=' sign in the parameter the value is the empty string. The value is as described in the get_param() method. Optional failobj is the object to return if there is no Content-Type header. Optional header is the header to search instead of Content-Type. If unquote is True, the value is unquoted. """""" missing = object() params = self._get_params_preserve(missing, header) if params is missing: return failobj if unquote: return [(k, _unquotevalue(v)) for k, v in params] else: return params","Return the message's Content-Type parameters, as a list. The elements of the returned list are 2-tuples of key/value pairs, as split on the `=' sign. The left hand side of the `=' is the key, while the right hand side is the value. If there is no `=' sign in the parameter the value is the empty string. The value is as described in the get_param() method. Optional failobj is the object to return if there is no Content-Type header. Optional header is the header to search instead of Content-Type. If unquote is True, the value is unquoted." "def html(header_rows): """""" Convert a list of tuples describing a table into a HTML string """""" name = 'table%d' % next(tablecounter) return HtmlTable([map(str, row) for row in header_rows], name).render()",Convert a list of tuples describing a table into a HTML string "def create_employers(self): '''Generate employer bees. This should be called directly after the ABC is initialized. ''' self.__verify_ready(True) employers = [] for i in range(self._num_employers): employer = EmployerBee(self.__gen_random_values()) if self._processes <= 1: employer.error = self._fitness_fxn( employer.values, **self._args ) employer.score = employer.get_score() if np.isnan(employer.score): self._logger.log('warn', 'NaN bee score: {}, {}'.format( employer.id, employer.score )) self._logger.log('debug', 'Bee number {} created'.format( i + 1 )) self.__update(employer.score, employer.values, employer.error) else: employer.error = self._pool.apply_async( self._fitness_fxn, [employer.values], self._args ) employers.append(employer) self._employers.append(employer) for idx, employer in enumerate(employers): try: employer.error = employer.error.get() employer.score = employer.get_score() if np.isnan(employer.score): self._logger.log('warn', 'NaN bee score: {}, {}'.format( employer.id, employer.score )) self._logger.log('debug', 'Bee number {} created'.format( i + 1 )) self.__update(employer.score, employer.values, employer.error) except Exception as e: raise e self._logger.log('debug', 'Employer creation complete')","Generate employer bees. This should be called directly after the ABC is initialized." "def _rgb_to_hex(rgbs): """"""Convert rgb to hex triplet"""""" rgbs, n_dim = _check_color_dim(rgbs) return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8)) for rgb in rgbs], '|U7')",Convert rgb to hex triplet "def certify_tuple(value, certifier=None, min_len=None, max_len=None, required=True, schema=None): """""" Validates a tuple, checking it against an optional schema. The schema should be a list of expected values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_tuple(schema=( ... certify_key(kind='Model'), ... certify_int(min=0), ... )) >>> certifier((self.key, self.count)) :param tuple value: The value to be certified. :param func certifier: A function to be called on each value in the iterable to check that it is valid. :param int min_len: The minimum acceptable length for the iterable. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the iterable. If None, the maximum length is not checked. :param bool required: Whether the value can't be `None`. Defaults to True. :param tuple schema: The schema against which the value should be checked. For single-item tuple make sure to add comma at the end of schema tuple, that is, for example: schema=(certify_int(),) :return: The certified tuple. :rtype: tuple :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid """""" certify_iterable( value=value, types=tuple([tuple]), certifier=certifier, min_len=min_len, max_len=max_len, schema=schema, required=required, )","Validates a tuple, checking it against an optional schema. The schema should be a list of expected values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_tuple(schema=( ... certify_key(kind='Model'), ... certify_int(min=0), ... )) >>> certifier((self.key, self.count)) :param tuple value: The value to be certified. :param func certifier: A function to be called on each value in the iterable to check that it is valid. :param int min_len: The minimum acceptable length for the iterable. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the iterable. If None, the maximum length is not checked. :param bool required: Whether the value can't be `None`. Defaults to True. :param tuple schema: The schema against which the value should be checked. For single-item tuple make sure to add comma at the end of schema tuple, that is, for example: schema=(certify_int(),) :return: The certified tuple. :rtype: tuple :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid" "def get_answers(self): """"""Gets the answers. return: (osid.assessment.AnswerList) - the answers raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """""" # Implemented from template for osid.repository.Asset.get_asset_contents_template return AnswerList( self._my_map['answers'], runtime=self._runtime, proxy=self._proxy)","Gets the answers. return: (osid.assessment.AnswerList) - the answers raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*" "def cmdline(argv=sys.argv[1:]): """""" Script for merging different collections of stop words. """""" parser = ArgumentParser( description='Create and merge collections of stop words') parser.add_argument( 'language', help='The language used in the collection') parser.add_argument('sources', metavar='FILE', nargs='+', help='Source files to parse') options = parser.parse_args(argv) factory = StopWordFactory() language = options.language stop_words = factory.get_stop_words(language, fail_safe=True) for filename in options.sources: stop_words += StopWord(language, factory.read_collection(filename)) filename = factory.get_collection_filename(stop_words.language) factory.write_collection(filename, stop_words.collection)",Script for merging different collections of stop words. "def _are_scopes_sufficient(authorized_scopes, sufficient_scopes): """"""Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes """""" for sufficient_scope_set in sufficient_scopes: if sufficient_scope_set.issubset(authorized_scopes): return True return False","Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes" "def CorrectWrongEmails(self, askInput=True): '''Corrects Emails in wrong_emails''' for email in self.wrong_emails: corrected_email = self.CorrectEmail(email) self.emails[self.emails.index(email)] = corrected_email self.wrong_emails = []",Corrects Emails in wrong_emails "def CreateAttachmentAndUploadMedia(self, document_link, readable_stream, options=None): """"""Creates an attachment and upload media. :param str document_link: The link to the document. :param (file-like stream object) readable_stream: :param dict options: The request options for the request. :return: The created Attachment. :rtype: dict """""" if options is None: options = {} document_id, initial_headers, path = self._GetItemIdWithPathForAttachmentMedia(document_link, options) return self.Create(readable_stream, path, 'attachments', document_id, initial_headers, options)","Creates an attachment and upload media. :param str document_link: The link to the document. :param (file-like stream object) readable_stream: :param dict options: The request options for the request. :return: The created Attachment. :rtype: dict" "def query_sequence(self): """""" Overrides align. corrects orientation with reverse complement if on negative strand .. warning:: this returns the full query sequence, not just the aligned portion, but i also does not include hard clipped portions (only soft clipped) """""" if not self.entries.seq: return None if self.check_flag(0x10): return rc(self.entries.seq) return self.entries.seq","Overrides align. corrects orientation with reverse complement if on negative strand .. warning:: this returns the full query sequence, not just the aligned portion, but i also does not include hard clipped portions (only soft clipped)" "def order(self, order=None): """""" If order is given, modify the URL correspondingly, return the current order otherwise. """""" if order is None: return int(self.url.order) self.url.order = str(order)","If order is given, modify the URL correspondingly, return the current order otherwise." "def load_children(self): """"""If the Shard doesn't have any children, tries to find some from DescribeStream. If the Shard is open this won't find any children, so an empty response doesn't mean the Shard will **never** have children. """""" # Child count is fixed the first time any of the following happen: # 0 :: stream closed or throughput decreased # 1 :: shard was open for ~4 hours # 2 :: throughput increased if self.children: return self.children # ParentShardId -> [Shard, ...] by_parent = collections.defaultdict(list) # ShardId -> Shard by_id = {} for shard in self.session.describe_stream( stream_arn=self.stream_arn, first_shard=self.shard_id)[""Shards""]: parent_list = by_parent[shard.get(""ParentShardId"")] shard = Shard( stream_arn=self.stream_arn, shard_id=shard[""ShardId""], parent=shard.get(""ParentShardId""), session=self.session) parent_list.append(shard) by_id[shard.shard_id] = shard # Find this shard when looking up shards by ParentShardId by_id[self.shard_id] = self # Insert this shard's children, then handle its child's descendants etc. to_insert = collections.deque(by_parent[self.shard_id]) while to_insert: shard = to_insert.popleft() # ParentShardId -> Shard shard.parent = by_id[shard.parent] shard.parent.children.append(shard) # Continue for any shards that have this shard as their parent to_insert.extend(by_parent[shard.shard_id]) return self.children","If the Shard doesn't have any children, tries to find some from DescribeStream. If the Shard is open this won't find any children, so an empty response doesn't mean the Shard will **never** have children." "def _translate_port(port): ''' Look into services and return the port value using the service name as lookup value. ''' services = _get_services_mapping() if port in services and services[port]['port']: return services[port]['port'][0] return port","Look into services and return the port value using the service name as lookup value." "def get_variable_for_feature(self, feature_key, variable_key): """""" Get the variable with the given variable key for the given feature. Args: feature_key: The key of the feature for which we are getting the variable. variable_key: The key of the variable we are getting. Returns: Variable with the given key in the given variation. """""" feature = self.feature_key_map.get(feature_key) if not feature: self.logger.error('Feature with key ""%s"" not found in the datafile.' % feature_key) return None if variable_key not in feature.variables: self.logger.error('Variable with key ""%s"" not found in the datafile.' % variable_key) return None return feature.variables.get(variable_key)","Get the variable with the given variable key for the given feature. Args: feature_key: The key of the feature for which we are getting the variable. variable_key: The key of the variable we are getting. Returns: Variable with the given key in the given variation." "def findall(self): """"""Find all files under the base and set ``allfiles`` to the absolute pathnames of files found. """""" from stat import S_ISREG, S_ISDIR, S_ISLNK self.allfiles = allfiles = [] root = self.base stack = [root] pop = stack.pop push = stack.append while stack: root = pop() names = os.listdir(root) for name in names: fullname = os.path.join(root, name) # Avoid excess stat calls -- just one will do, thank you! stat = os.stat(fullname) mode = stat.st_mode if S_ISREG(mode): allfiles.append(fsdecode(fullname)) elif S_ISDIR(mode) and not S_ISLNK(mode): push(fullname)","Find all files under the base and set ``allfiles`` to the absolute pathnames of files found." "def chem_shifts_by_residue(self, amino_acids=None, atoms=None, amino_acids_and_atoms=None, nmrstar_version=""3""): """"""Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict` """""" if (amino_acids_and_atoms and amino_acids) or (amino_acids_and_atoms and atoms): raise ValueError('""amino_acids_and_atoms"" parameter cannot be used simultaneously with ' '""amino_acids"" and ""atoms"" parameters, one or another must be provided.') chemshifts_loop = NMRSTAR_CONSTANTS[nmrstar_version][""chemshifts_loop""] aminoacid_seq_id = NMRSTAR_CONSTANTS[nmrstar_version][""aminoacid_seq_id""] aminoacid_code = NMRSTAR_CONSTANTS[nmrstar_version][""aminoacid_code""] atom_code = NMRSTAR_CONSTANTS[nmrstar_version][""atom_code""] chemshift_value = NMRSTAR_CONSTANTS[nmrstar_version][""chemshift_value""] chains = [] for saveframe in self: if saveframe == u""data"" or saveframe.startswith(u""comment""): continue else: for ind in self[saveframe].keys(): if ind.startswith(u""loop_""): if list(self[saveframe][ind][0]) == chemshifts_loop: chem_shifts_dict = OrderedDict() for entry in self[saveframe][ind][1]: residue_id = entry[aminoacid_seq_id] chem_shifts_dict.setdefault(residue_id, OrderedDict()) chem_shifts_dict[residue_id][u""AA3Code""] = entry[aminoacid_code] chem_shifts_dict[residue_id][u""Seq_ID""] = residue_id chem_shifts_dict[residue_id][entry[atom_code]] = entry[chemshift_value] chains.append(chem_shifts_dict) if amino_acids_and_atoms: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u""AA3Code""].upper() not in list(amino_acids_and_atoms.keys()): chem_shifts_dict.pop(aa_dict[u""Seq_ID""]) else: for resonance in list(aa_dict.keys()): if resonance in (u""AA3Code"", u""Seq_ID"") or resonance.upper() in amino_acids_and_atoms[aa_dict[u""AA3Code""]]: continue else: aa_dict.pop(resonance) else: if amino_acids: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u""AA3Code""].upper() not in amino_acids: chem_shifts_dict.pop(aa_dict[u""Seq_ID""]) if atoms: for chem_shifts_dict in chains: for aa_dict in chem_shifts_dict.values(): for resonance in list(aa_dict.keys()): if resonance in (u""AA3Code"", u""Seq_ID"") or resonance.upper() in atoms: continue else: aa_dict.pop(resonance) return chains","Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict`" "def accept_format(*, version: str = ""v3"", media: Optional[str] = None, json: bool = True) -> str: """"""Construct the specification of the format that a request should return. The version argument defaults to v3 of the GitHub API and is applicable to all requests. The media argument along with 'json' specifies what format the request should return, e.g. requesting the rendered HTML of a comment. Do note that not all of GitHub's API supports alternative formats. The default arguments of this function will always return the latest stable version of the GitHub API in the default format that this library is designed to support. """""" # https://developer.github.com/v3/media/ # https://developer.github.com/v3/#current-version accept = f""application/vnd.github.{version}"" if media is not None: accept += f"".{media}"" if json: accept += ""+json"" return accept","Construct the specification of the format that a request should return. The version argument defaults to v3 of the GitHub API and is applicable to all requests. The media argument along with 'json' specifies what format the request should return, e.g. requesting the rendered HTML of a comment. Do note that not all of GitHub's API supports alternative formats. The default arguments of this function will always return the latest stable version of the GitHub API in the default format that this library is designed to support." "def update(dst, src): """""" Recursively update values in dst from src. Unlike the builtin dict.update() function, this method will decend into nested dicts, updating all nested values. Arguments: dst (dict): Destination dict. src (dict): Source dict. Returns: dict: dst updated with entries from src. """""" for k, v in src.items(): if isinstance(v, Mapping): r = update(dst.get(k, {}), v) dst[k] = r else: dst[k] = src[k] return dst","Recursively update values in dst from src. Unlike the builtin dict.update() function, this method will decend into nested dicts, updating all nested values. Arguments: dst (dict): Destination dict. src (dict): Source dict. Returns: dict: dst updated with entries from src." "def cut(args): """""" %prog cut unitigfile fragID Cut the unitig at a given fragment. Run `%prog trace unitigfile` first to see which fragment breaks the unitig. """""" from jcvi.formats.base import SetFile p = OptionParser(cut.__doc__) p.add_option(""-s"", dest=""shredafter"", default=False, action=""store_true"", help=""Shred fragments after the given fragID [default: %default]"") p.add_option(""--notest"", default=False, action=""store_true"", help=""Do not test the unitigfile after edits [default: %default]"") p.add_option(""--blacklist"", help=""File that contains blacklisted fragments to be popped "" ""[default: %default]"") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) s, fragID = args u = UnitigLayout(s) blacklist = opts.blacklist black = SetFile(blacklist) if blacklist else None if opts.shredafter: u.shredafter(fragID) elif black: assert fragID == ""0"", ""Must set fragID to 0 when --blacklist is on"" u.pop(black) else: u.cut(fragID) u.print_to_file(inplace=True) if not opts.notest: test([s])","%prog cut unitigfile fragID Cut the unitig at a given fragment. Run `%prog trace unitigfile` first to see which fragment breaks the unitig." "def find_ribosomal(rps, scaffolds, s2rp, min_hits, max_hits_rp, max_errors): """""" determine which hits represent real ribosomal proteins, identify each in syntenic block max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold """""" for scaffold, proteins in list(s2rp.items()): # for each scaffold, get best hits for each rp hits = {p: [i for i in sorted(hits, key = itemgetter(10))][0:max_hits_rp] for p, hits in list(proteins.items()) if len(hits) > 0} # skip if fewer than min_hits RPs are identified if len(hits) < min_hits: continue best = sorted([hit[0] + [p] for p, hit in list(hits.items())], key = itemgetter(10))[0] block = find_block(rps, scaffolds[scaffold], hits, best, max_errors) if (len(block) - 1) >= min_hits: yield scaffold, block","determine which hits represent real ribosomal proteins, identify each in syntenic block max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold" "def publish(self, message): """""" Publishes the message to all subscribers of this topic :param message: (object), the message to be published. """""" message_data = self._to_data(message) self._encode_invoke(topic_publish_codec, message=message_data)","Publishes the message to all subscribers of this topic :param message: (object), the message to be published." "def timedelta_div(first: datetime.timedelta, second: datetime.timedelta) -> Optional[float]: """"""Implement divison for timedelta instances. :param first: First timedelta instance. :param second: Second timedelta instance. """""" first_seconds = timedelta_seconds(first) second_seconds = timedelta_seconds(second) if not second_seconds: return None return first_seconds / second_seconds","Implement divison for timedelta instances. :param first: First timedelta instance. :param second: Second timedelta instance." "def cmd_legend(self, args): '''setup legend for graphs''' if len(args) == 0: for leg in self.legend.keys(): print(""%s -> %s"" % (leg, self.legend[leg])) elif len(args) == 1: leg = args[0] if leg in self.legend: print(""Removing legend %s"" % leg) self.legend.pop(leg) elif len(args) >= 2: leg = args[0] leg2 = args[1] print(""Adding legend %s -> %s"" % (leg, leg2)) self.legend[leg] = leg2",setup legend for graphs "def _modelmat(self, X, term=-1): """""" Builds a model matrix, B, out of the spline basis for each feature B = [B_0, B_1, ..., B_p] Parameters --------- X : array-like of shape (n_samples, m_features) containing the input dataset term : int, optional term index for which to compute the model matrix if -1, will create the model matrix for all features Returns ------- modelmat : sparse matrix of len n_samples containing model matrix of the spline basis for selected features """""" X = check_X(X, n_feats=self.statistics_['m_features'], edge_knots=self.edge_knots_, dtypes=self.dtype, features=self.feature, verbose=self.verbose) return self.terms.build_columns(X, term=term)","Builds a model matrix, B, out of the spline basis for each feature B = [B_0, B_1, ..., B_p] Parameters --------- X : array-like of shape (n_samples, m_features) containing the input dataset term : int, optional term index for which to compute the model matrix if -1, will create the model matrix for all features Returns ------- modelmat : sparse matrix of len n_samples containing model matrix of the spline basis for selected features" "def prepend(self, bs): """"""Prepend a bitstring to the current bitstring. bs -- The bitstring to prepend. """""" bs = self._converttobitstring(bs) self._prepend(bs) self._pos += bs.len","Prepend a bitstring to the current bitstring. bs -- The bitstring to prepend." "def unicode_dict(_dict): """""" Make sure keys and values of dict is unicode. """""" r = {} for k, v in iteritems(_dict): r[unicode_obj(k)] = unicode_obj(v) return r",Make sure keys and values of dict is unicode. "def open_preview(self): ''' Try to open a preview of the generated document. Currently only supported on Windows. ''' self.log.info('Opening preview...') if self.opt.pdf: ext = 'pdf' else: ext = 'dvi' filename = '%s.%s' % (self.project_name, ext) if sys.platform == 'win32': try: os.startfile(filename) except OSError: self.log.error( 'Preview-Error: Extension .%s is not linked to a ' 'specific application!' % ext ) elif sys.platform == 'darwin': call(['open', filename]) else: self.log.error( 'Preview-Error: Preview function is currently not ' 'supported on Linux.' )","Try to open a preview of the generated document. Currently only supported on Windows." "def action_approve(self): """"""Set a change request as approved."""""" for rec in self: if rec.state not in ['draft', 'to approve']: raise UserError( _(""Can't approve page in '%s' state."") % rec.state) if not rec.am_i_approver: raise UserError(_( 'You are not authorized to do this.\r\n' 'Only approvers with these groups can approve this: ' ) % ', '.join( [g.display_name for g in rec.page_id.approver_group_ids])) # Update state rec.write({ 'state': 'approved', 'approved_date': fields.datetime.now(), 'approved_uid': self.env.uid, }) # Trigger computed field update rec.page_id._compute_history_head() # Notify state change rec.message_post( subtype='mt_comment', body=_( 'Change request has been approved by %s.' ) % (self.env.user.name) ) # Notify followers a new version is available rec.page_id.message_post( subtype='mt_comment', body=_( 'New version of the document %s approved.' ) % (rec.page_id.name) )",Set a change request as approved. "def create_torso_layer(aspect, ip): '''Reads the TORSO.pgn file and creates the torso layer.''' layer = [] if 'TOP' in aspect: layer = pgnreader.parse_pagan_file(FILE_TORSO, ip, invert=False, sym=True) return layer","Reads the TORSO.pgn file and creates the torso layer." "def update_confirmation_item(self, confirmation_item_id, confirmation_item_dict): """""" Updates a confirmation item :param confirmation_item_id: the confirmation item id :param confirmation_item_dict: dict :return: dict """""" return self._create_put_request( resource=CONFIRMATION_ITEMS, billomat_id=confirmation_item_id, send_data=confirmation_item_dict )","Updates a confirmation item :param confirmation_item_id: the confirmation item id :param confirmation_item_dict: dict :return: dict" "def get_system_config_directory(): """""" Return platform specific config directory. """""" if platform.system().lower() == 'windows': _cfg_directory = Path(os.getenv('APPDATA') or '~') elif platform.system().lower() == 'darwin': _cfg_directory = Path('~', 'Library', 'Preferences') else: _cfg_directory = Path(os.getenv('XDG_CONFIG_HOME') or '~/.config') logger.debug('Fetching configt directory for {}.' .format(platform.system())) return _cfg_directory.joinpath(Path('mayalauncher/.config'))",Return platform specific config directory. "def byweekday(self): """""" The weekdays where the recurrence will be applied. In RFC5545 this is called BYDAY, but is renamed by dateutil to avoid ambiguity. """""" retval = [] if self.rule._byweekday: retval += [Weekday(day) for day in self.rule._byweekday] if self.rule._bynweekday: retval += [Weekday(day, n) for day, n in self.rule._bynweekday] return retval","The weekdays where the recurrence will be applied. In RFC5545 this is called BYDAY, but is renamed by dateutil to avoid ambiguity." "def to_nested_php_args(data, prefix_key=None): """""" This function will take either a dict or list and will recursively loop through the values converting it into a format similar to a PHP array which Ubersmith requires for the info portion of the API's order.create method. """""" is_root = prefix_key is None prefix_key = prefix_key if prefix_key else '' if islist(data): data_iter = data if is_root else enumerate(data) new_data = [] if is_root else {} elif isdict(data): data_iter = list(data.items()) new_data = {} else: raise TypeError('expected dict or list, got {0}'.format(type(data))) if islist(new_data): def data_set(k, v): new_data.append((k, v)) def data_update(d): for k, v in list(d.items()): new_data.append((k, v)) else: def data_set(k, v): new_data[k] = v data_update = new_data.update for key, value in data_iter: end_key = prefix_key + (str(key) if is_root else '[{0}]'.format(key)) if _is_leaf(value): data_set(end_key, value) else: nested_args = to_nested_php_args(value, end_key) data_update(nested_args) return new_data","This function will take either a dict or list and will recursively loop through the values converting it into a format similar to a PHP array which Ubersmith requires for the info portion of the API's order.create method." "async def refresh(self) -> None: """""" Refresh local copy of pool ledger and update node pool connections. """""" LOGGER.debug('NodePool.refresh >>>') await pool.refresh_pool_ledger(self.handle) LOGGER.debug('NodePool.refresh <<<')",Refresh local copy of pool ledger and update node pool connections. "def add(self, labels, value): """"""Add adds a single observation to the summary."""""" if type(value) not in (float, int): raise TypeError(""Summary only works with digits (int, float)"") # We have already a lock for data but not for the estimator with mutex: try: e = self.get_value(labels) except KeyError: # Initialize quantile estimator e = quantile.Estimator(*self.__class__.DEFAULT_INVARIANTS) self.set_value(labels, e) e.observe(float(value))",Add adds a single observation to the summary. "def flatten_models(models): "" Create 1d-array containing all disctinct models from ``models``. "" if isinstance(models, MultiFitterModel): ans = [models] else: tasklist = MultiFitter._compile_models(models) ans = MultiFitter._flatten_models(tasklist) return ans",Create 1d-array containing all disctinct models from ``models``. "def ekifld(handle, tabnam, ncols, nrows, cnmlen, cnames, declen, decls): """""" Initialize a new E-kernel segment to allow fast writing. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekifld_c.html :param handle: File handle. :type handle: int :param tabnam: Table name. :type tabnam: str :param ncols: Number of columns in the segment. :type ncols: int :param nrows: Number of rows in the segment. :type nrows: int :param cnmlen: Length of names in in column name array. :type cnmlen: int :param cnames: Names of columns. :type cnames: list of str. :param declen: Length of declaration strings in declaration array. :type declen: int :param decls: Declarations of columns. :type decls: list of str. :return: Segment number, Array of record pointers. :rtype: tuple """""" handle = ctypes.c_int(handle) tabnam = stypes.stringToCharP(tabnam) ncols = ctypes.c_int(ncols) nrows = ctypes.c_int(nrows) cnmlen = ctypes.c_int(cnmlen) cnames = stypes.listToCharArray(cnames) declen = ctypes.c_int(declen) recptrs = stypes.emptyIntVector(nrows) decls = stypes.listToCharArray(decls) segno = ctypes.c_int() libspice.ekifld_c(handle, tabnam, ncols, nrows, cnmlen, cnames, declen, decls, ctypes.byref(segno), recptrs) return segno.value, stypes.cVectorToPython(recptrs)","Initialize a new E-kernel segment to allow fast writing. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekifld_c.html :param handle: File handle. :type handle: int :param tabnam: Table name. :type tabnam: str :param ncols: Number of columns in the segment. :type ncols: int :param nrows: Number of rows in the segment. :type nrows: int :param cnmlen: Length of names in in column name array. :type cnmlen: int :param cnames: Names of columns. :type cnames: list of str. :param declen: Length of declaration strings in declaration array. :type declen: int :param decls: Declarations of columns. :type decls: list of str. :return: Segment number, Array of record pointers. :rtype: tuple" "def get_api_items(api_doc_fd): """""" Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """""" current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line","Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located." "def strip_number(self): """"""The number of the strip that has changed state, with 0 being the first strip. On tablets with only one strip, this method always returns 0. For events not of type :attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property raises :exc:`AttributeError`. Returns: int: The index of the strip that changed state. Raises: AttributeError """""" if self.type != EventType.TABLET_PAD_STRIP: raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_tablet_pad_get_strip_number( self._handle)","The number of the strip that has changed state, with 0 being the first strip. On tablets with only one strip, this method always returns 0. For events not of type :attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property raises :exc:`AttributeError`. Returns: int: The index of the strip that changed state. Raises: AttributeError" "def get_json_ld_extra(key, value): '''Serialize an extras key, value pair into JSON-LD''' value = value.serialize() if hasattr(value, 'serialize') else value return { '@type': 'http://schema.org/PropertyValue', 'name': key, 'value': value, }","Serialize an extras key, value pair into JSON-LD" "def _read_config(self): """"""Read this component's configuration from the database"""""" try: self.config = self.componentmodel.find_one( {'name': self.uniquename}) except ServerSelectionTimeoutError: # pragma: no cover self.log(""No database access! Check if mongodb is running "" ""correctly."", lvl=critical) if self.config: self.log(""Configuration read."", lvl=verbose) else: self.log(""No configuration found."", lvl=warn)",Read this component's configuration from the database "def safe_run(coro, return_exceptions=False): """""" Executes a given coroutine and optionally catches exceptions, returning them as value. This function is intended to be used internally. """""" try: result = yield from coro except Exception as err: if return_exceptions: result = err else: raise err return result","Executes a given coroutine and optionally catches exceptions, returning them as value. This function is intended to be used internally." "def handle_property(self, obj): """"""Handle a property event. This function will set an attribute on an object if the event requires it. :param obj: A :py:class:`~turberfield.dialogue.model.Model.Property` object. :return: The supplied object. """""" if obj.object is not None: try: setattr(obj.object, obj.attr, obj.val) except AttributeError as e: self.log.error("". "".join(getattr(e, ""args"", e) or e)) try: print( ""{t.dim}{obj.object._name}.{obj.attr} = {obj.val!s}{t.normal}"".format( obj=obj, t=self.terminal ), end=""\n"" * 2, file=self.terminal.stream ) except AttributeError as e: self.log.error("". "".join(getattr(e, ""args"", e) or e)) return obj","Handle a property event. This function will set an attribute on an object if the event requires it. :param obj: A :py:class:`~turberfield.dialogue.model.Model.Property` object. :return: The supplied object." "def renew_subscription(self, sid, timeout=None): """""" Renews a previously configured subscription. """""" url = urljoin(self._url_base, self._event_sub_url) headers = dict( HOST=urlparse(url).netloc, SID=sid ) if timeout is not None: headers['TIMEOUT'] = 'Second-%s' % timeout resp = requests.request('SUBSCRIBE', url, headers=headers, auth=self.device.http_auth) resp.raise_for_status() return Service.validate_subscription_renewal_response(resp)",Renews a previously configured subscription. "def resample(self, data, stats=None, mask=None, niter=None): """""" Introduce a mask that allows for missing data """""" stats = self._get_statistics(data, mask=mask) if stats is None else stats stats = self._stats_ensure_array(stats) niter = niter if niter else self.niter for itr in range(niter): self._resample_A(stats) self._resample_sigma(stats)",Introduce a mask that allows for missing data "def register_layout(self, name, layout): """""" Registers given layout. :param name: Layout name. :type name: unicode :param layout: Layout object. :type layout: Layout :return: Method success. :rtype: bool """""" if name in self: raise umbra.exceptions.LayoutRegistrationError(""{0} | '{1}' layout is already registered!"".format( self.__class__.__name__, name)) self.__layouts[name] = layout return True","Registers given layout. :param name: Layout name. :type name: unicode :param layout: Layout object. :type layout: Layout :return: Method success. :rtype: bool" "def getTicker(pair, connection=None, info=None): """"""Retrieve the ticker for the given pair. Returns a Ticker instance."""""" if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest(""/api/3/ticker/%s"" % pair) if type(response) is not dict: raise TypeError(""The response is a %r, not a dict."" % type(response)) elif u'error' in response: print(""There is a error \""%s\"" while obtaining ticker %s"" % (response['error'], pair)) ticker = None else: ticker = Ticker(**response[pair]) return ticker",Retrieve the ticker for the given pair. Returns a Ticker instance. "def temp_copy(self): """"""Yields a new Vcs object that represents a temporary, disposable copy of the current repository. The copy is deleted at the end of the context. The following are not copied: - ignored files - easyci private directory (.git/eci for git) Yields: Vcs """""" with contextmanagers.temp_dir() as temp_dir: temp_root_path = os.path.join(temp_dir, 'root') path = os.path.join(self.path, '') # adds trailing slash check_call(['rsync', '-r', ""--exclude={}"".format(self.private_dir()), ""--filter=dir-merge,- {}"".format( self.ignore_patterns_file()), path, temp_root_path]) copy = self.__class__(path=temp_root_path) yield copy","Yields a new Vcs object that represents a temporary, disposable copy of the current repository. The copy is deleted at the end of the context. The following are not copied: - ignored files - easyci private directory (.git/eci for git) Yields: Vcs" "def missingkeys_standard(commdct, dtls, skiplist=None): """"""put missing keys in commdct for standard objects return a list of keys where it is unable to do so commdct is not returned, but is updated"""""" if skiplist == None: skiplist = [] # find objects where all the fields are not named gkeys = [dtls[i] for i in range(len(dtls)) if commdct[i].count({}) > 2] nofirstfields = [] # operatie on those fields for key_txt in gkeys: if key_txt in skiplist: continue # print key_txt # for a function, pass comm as a variable key_i = dtls.index(key_txt.upper()) comm = commdct[key_i] # get all fields fields = getfields(comm) # get repeating field names repnames = repeatingfieldsnames(fields) try: first = repnames[0][0] % (1, ) except IndexError: nofirstfields.append(key_txt) continue # print first # get all comments of the first repeating field names firstnames = [repname[0] % (1, ) for repname in repnames] fcomments = [field for field in fields if bunchhelpers.onlylegalchar(field['field'][0]) in firstnames] fcomments = [dict(fcomment) for fcomment in fcomments] for cmt in fcomments: fld = cmt['field'][0] fld = bunchhelpers.onlylegalchar(fld) fld = bunchhelpers.replaceint(fld) cmt['field'] = [fld] for i, cmt in enumerate(comm[1:]): thefield = cmt['field'][0] thefield = bunchhelpers.onlylegalchar(thefield) if thefield == first: break first_i = i + 1 newfields = [] for i in range(1, len(comm[first_i:]) // len(repnames) + 1): for fcomment in fcomments: nfcomment = dict(fcomment) fld = nfcomment['field'][0] fld = fld % (i, ) nfcomment['field'] = [fld] newfields.append(nfcomment) for i, cmt in enumerate(comm): if i < first_i: continue else: afield = newfields.pop(0) comm[i] = afield commdct[key_i] = comm return nofirstfields","put missing keys in commdct for standard objects return a list of keys where it is unable to do so commdct is not returned, but is updated" "def _font_of_mention(m): """""" Returns the font type and size of the first alphanumeric char in the text or None if there isn't any. """""" for ch in m: if isinstance(ch, LTChar) and ch.get_text().isalnum(): return (ch.fontname, _font_size_of(ch)) return (None, 0)","Returns the font type and size of the first alphanumeric char in the text or None if there isn't any." "def get_email_template(name, language=''): """""" Function that returns an email template instance, from cache or DB. """""" use_cache = getattr(settings, 'POST_OFFICE_CACHE', True) if use_cache: use_cache = getattr(settings, 'POST_OFFICE_TEMPLATE_CACHE', True) if not use_cache: return EmailTemplate.objects.get(name=name, language=language) else: composite_name = '%s:%s' % (name, language) email_template = cache.get(composite_name) if email_template is not None: return email_template else: email_template = EmailTemplate.objects.get(name=name, language=language) cache.set(composite_name, email_template) return email_template","Function that returns an email template instance, from cache or DB." "def start_agent(self, cfgin=True): """""" CLI interface to start 12-factor service """""" default_conf = { ""threads"": { ""result"": { ""number"": 0, ""function"": None }, ""worker"": { ""number"": 0, ""function"": None }, }, ""interval"": { ""refresh"": 900, ""heartbeat"": 300, ""reporting"": 300, ""test"": 60 }, ""heartbeat-hook"": False } indata = {} if cfgin: indata = json.load(sys.stdin) elif os.environ.get(""REFLEX_MONITOR_CONFIG""): indata = os.environ.get(""REFLEX_MONITOR_CONFIG"") if indata[0] != ""{"": indata = base64.b64decode(indata) else: self.NOTIFY(""Using default configuration"") conf = dictlib.union(default_conf, indata) conf['threads']['result']['function'] = self.handler_thread conf['threads']['worker']['function'] = self.worker_thread self.NOTIFY(""Starting monitor Agent"") try: self.configure(conf).start() except KeyboardInterrupt: self.thread_stopper.set() if self.refresh_stopper: self.refresh_stopper.set() if self.heartbeat_stopper: self.heartbeat_stopper.set() if self.reporting_stopper: self.reporting_stopper.set()",CLI interface to start 12-factor service "def dump(self): """""" dump extracted data into a single hdf5file, :return: None :Example: >>> # dump data into an hdf5 formated file >>> datafields = ['s', 'Sx', 'Sy', 'enx', 'eny'] >>> datascript = 'sddsprintdata.sh' >>> datapath = './tests/tracking' >>> hdf5file = './tests/tracking/test.h5' >>> A = DataExtracter('test.sig', *datafields) >>> A.setDataScript(datascript) >>> A.setDataPath (datapath) >>> A.setH5file (hdf5file) >>> A.extractData().dump() >>> >>> # read dumped file >>> fd = h5py.File(hdf5file, 'r') >>> d_s = fd['s'][:] >>> d_sx = fd['Sx'][:] >>> >>> # plot dumped data >>> import matplotlib.pyplot as plt >>> plt.figure(1) >>> plt.plot(d_s, d_sx, 'r-') >>> plt.xlabel('$s$') >>> plt.ylabel('$\sigma_x$') >>> plt.show() Just like the following figure shows: .. image:: ../../images/test_DataExtracter.png :width: 400px """""" f = h5py.File(self.h5file, 'w') for i, k in enumerate(self.kwslist): v = self.h5data[:, i] dset = f.create_dataset(k, shape=v.shape, dtype=v.dtype) dset[...] = v f.close()","dump extracted data into a single hdf5file, :return: None :Example: >>> # dump data into an hdf5 formated file >>> datafields = ['s', 'Sx', 'Sy', 'enx', 'eny'] >>> datascript = 'sddsprintdata.sh' >>> datapath = './tests/tracking' >>> hdf5file = './tests/tracking/test.h5' >>> A = DataExtracter('test.sig', *datafields) >>> A.setDataScript(datascript) >>> A.setDataPath (datapath) >>> A.setH5file (hdf5file) >>> A.extractData().dump() >>> >>> # read dumped file >>> fd = h5py.File(hdf5file, 'r') >>> d_s = fd['s'][:] >>> d_sx = fd['Sx'][:] >>> >>> # plot dumped data >>> import matplotlib.pyplot as plt >>> plt.figure(1) >>> plt.plot(d_s, d_sx, 'r-') >>> plt.xlabel('$s$') >>> plt.ylabel('$\sigma_x$') >>> plt.show() Just like the following figure shows: .. image:: ../../images/test_DataExtracter.png :width: 400px" "def create_in_project(self, project, params={}, **options): """"""Creates a new section in a project. Returns the full record of the newly created section. Parameters ---------- project : {Id} The project to create the section in [data] : {Object} Data for the request - name : {String} The text to be displayed as the section name. This cannot be an empty string. """""" path = ""/projects/%s/sections"" % (project) return self.client.post(path, params, **options)","Creates a new section in a project. Returns the full record of the newly created section. Parameters ---------- project : {Id} The project to create the section in [data] : {Object} Data for the request - name : {String} The text to be displayed as the section name. This cannot be an empty string." "def _calc_overlap_coef( markers1: dict, markers2: dict, ): """"""Calculate overlap coefficient between the values of two dictionaries Note: dict values must be sets """""" overlap_coef=np.zeros((len(markers1), len(markers2))) j=0 for marker_group in markers1: tmp = [len(markers2[i].intersection(markers1[marker_group]))/ max(min(len(markers2[i]), len(markers1[marker_group])),1) for i in markers2.keys()] overlap_coef[j,:] = tmp j += 1 return overlap_coef","Calculate overlap coefficient between the values of two dictionaries Note: dict values must be sets" "def decode (cls, bytes, cmddict): """"""Decodes a sequence command from an array of bytes, according to the given command dictionary, and returns a new SeqCmd. """""" attrs = SeqCmdAttrs.decode(bytes[0:1]) delay = SeqDelay .decode(bytes[1:4]) cmd = cmddict .decode(bytes[4:] ) return cls(cmd, delay, attrs)","Decodes a sequence command from an array of bytes, according to the given command dictionary, and returns a new SeqCmd." "def string_to_decimal(value, strict=True): """""" Return a decimal corresponding to the string representation of a number. @param value: a string representation of an decimal number. @param strict: indicate whether the specified string MUST be of a valid decimal number representation. @return: the decimal value represented by the string. @raise ValueError: if the string doesn't represent a valid decimal, while the argument ``strict`` equals ``True``. """""" if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None try: return float(value) except ValueError: raise ValueError( 'The specified string ""%s"" does not represent an integer' % value)","Return a decimal corresponding to the string representation of a number. @param value: a string representation of an decimal number. @param strict: indicate whether the specified string MUST be of a valid decimal number representation. @return: the decimal value represented by the string. @raise ValueError: if the string doesn't represent a valid decimal, while the argument ``strict`` equals ``True``." "def from_git(self, path=None, prefer_daily=False): """"""Use Git to determine the package version. This routine uses the __file__ value of the caller to determine which Git repository root to use. """""" if self._version is None: frame = caller(1) path = frame.f_globals.get('__file__') or '.' providers = ([git_day, git_version] if prefer_daily else [git_version, git_day]) for provider in providers: if self._version is not None: break try: with cd(path): self._version = provider() except CalledProcessError: pass except OSError as e: if e.errno != errno.ENOENT: raise return self","Use Git to determine the package version. This routine uses the __file__ value of the caller to determine which Git repository root to use." "def append_value(self, valuename, valuenum=None): """"""Append another enumeration value to the `enum`. The numeric value may be None in which case it is automatically determined by increasing the value of the last item. When the 'values' attribute is accessed the resulting list will be in the same order as append_value() was called. :param valuename: The name of the value. :type valuename: str :param valuenum: The numeric value or None. :type valuenum: int """""" # No number given? Then use the previous one + 1 if valuenum is None: if not self._values: valuenum = 0 else: valuenum = self._values[-1][1] + 1 # Store the new value self._values.append((valuename, int(valuenum)))","Append another enumeration value to the `enum`. The numeric value may be None in which case it is automatically determined by increasing the value of the last item. When the 'values' attribute is accessed the resulting list will be in the same order as append_value() was called. :param valuename: The name of the value. :type valuename: str :param valuenum: The numeric value or None. :type valuenum: int" "def create_missing(self): """"""Automagically populate all required instance attributes. Iterate through the set of all required class :class:`nailgun.entity_fields.Field` defined on ``type(self)`` and create a corresponding instance attribute if none exists. Subclasses should override this method if there is some relationship between two required fields. :return: Nothing. This method relies on side-effects. """""" for field_name, field in self.get_fields().items(): if field.required and not hasattr(self, field_name): # Most `gen_value` methods return a value such as an integer, # string or dictionary, but OneTo{One,Many}Field.gen_value # returns the referenced class. if hasattr(field, 'default'): value = field.default elif hasattr(field, 'choices'): value = gen_choice(field.choices) elif isinstance(field, OneToOneField): value = field.gen_value()(self._server_config).create(True) elif isinstance(field, OneToManyField): value = [ field.gen_value()(self._server_config).create(True) ] else: value = field.gen_value() setattr(self, field_name, value)","Automagically populate all required instance attributes. Iterate through the set of all required class :class:`nailgun.entity_fields.Field` defined on ``type(self)`` and create a corresponding instance attribute if none exists. Subclasses should override this method if there is some relationship between two required fields. :return: Nothing. This method relies on side-effects." "def scandir(path, app=None): ''' Config-aware scandir. Currently, only aware of ``exclude_fnc``. :param path: absolute path :type path: str :param app: flask application :type app: flask.Flask or None :returns: filtered scandir entries :rtype: iterator ''' exclude = app and app.config.get('exclude_fnc') if exclude: return ( item for item in compat.scandir(path) if not exclude(item.path) ) return compat.scandir(path)","Config-aware scandir. Currently, only aware of ``exclude_fnc``. :param path: absolute path :type path: str :param app: flask application :type app: flask.Flask or None :returns: filtered scandir entries :rtype: iterator" "def create_env(env_file): """"""Create environ dictionary from current os.environ and variables got from given `env_file`"""""" environ = {} with open(env_file, 'r') as f: for line in f.readlines(): line = line.rstrip(os.linesep) if '=' not in line: continue if line.startswith('#'): continue key, value = line.split('=', 1) environ[key] = parse_value(value) return environ","Create environ dictionary from current os.environ and variables got from given `env_file`" "def get_assessment_notification_session(self, assessment_receiver): """"""Gets the notification session for notifications pertaining to assessment changes. arg: assessment_receiver (osid.assessment.AssessmentReceiver): the assessment receiver interface return: (osid.assessment.AssessmentNotificationSession) - an ``AssessmentNotificationSession`` raise: NullArgument - ``assessment_receiver`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_notification()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_notification()`` is ``true``.* """""" if not self.supports_assessment_notification(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ItemNotificationSession(runtime=self._runtime, receiver=assessment_receiver)","Gets the notification session for notifications pertaining to assessment changes. arg: assessment_receiver (osid.assessment.AssessmentReceiver): the assessment receiver interface return: (osid.assessment.AssessmentNotificationSession) - an ``AssessmentNotificationSession`` raise: NullArgument - ``assessment_receiver`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_notification()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_notification()`` is ``true``.*" "def report(self, morfs=None): """""" Generate a part of json report for coveralls `morfs` is a list of modules or filenames. `outfile` is a file object to write the json to. """""" units = None if hasattr(self, 'find_code_units'): self.find_code_units(morfs) else: units = self.find_file_reporters(morfs) if units is None: if hasattr(self, 'code_units'): units = self.code_units else: units = self.file_reporters for cu in units: try: analyzed = self.coverage._analyze(cu) # pylint: disable=W0212 self.parse_file(cu, analyzed) except NoSource: if not self.config.ignore_errors: log.warning('No source for %s', cu.filename) except NotPython: # Only report errors for .py files, and only if we didn't # explicitly suppress those errors. if cu.should_be_python() and not self.config.ignore_errors: log.warning('Source file is not python %s', cu.filename) except KeyError: cov3x = __version__[0] < 4 cov40 = __version__[0] == 4 and __version__[1] < 1 if cov3x or cov40: raise CoverallsException( 'Old (<4.1) versions of coverage.py do not work ' 'consistently on new versions of Python. Please ' 'upgrade your coverage.py.' ) raise return self.source_files","Generate a part of json report for coveralls `morfs` is a list of modules or filenames. `outfile` is a file object to write the json to." "def _settle_message(self, message_number, response): """"""Send a settle dispostition for a received message. :param message_number: The delivery number of the message to settle. :type message_number: int :response: The type of disposition to respond with, e.g. whether the message was accepted, rejected or abandoned. :type response: ~uamqp.errors.MessageResponse """""" if not response or isinstance(response, errors.MessageAlreadySettled): return if isinstance(response, errors.MessageAccepted): self._receiver.settle_accepted_message(message_number) elif isinstance(response, errors.MessageReleased): self._receiver.settle_released_message(message_number) elif isinstance(response, errors.MessageRejected): self._receiver.settle_rejected_message( message_number, response.error_condition, response.error_description) elif isinstance(response, errors.MessageModified): self._receiver.settle_modified_message( message_number, response.failed, response.undeliverable, response.annotations) else: raise ValueError(""Invalid message response type: {}"".format(response))","Send a settle dispostition for a received message. :param message_number: The delivery number of the message to settle. :type message_number: int :response: The type of disposition to respond with, e.g. whether the message was accepted, rejected or abandoned. :type response: ~uamqp.errors.MessageResponse" "def parse_iptables_rule(line): ''' Parse one iptables rule. Returns a dict where each iptables code argument is mapped to a name using IPTABLES_ARGS. ''' bits = line.split() definition = {} key = None args = [] not_arg = False def add_args(): arg_string = ' '.join(args) if key in IPTABLES_ARGS: definition_key = ( 'not_{0}'.format(IPTABLES_ARGS[key]) if not_arg else IPTABLES_ARGS[key] ) definition[definition_key] = arg_string else: definition.setdefault('extras', []).extend((key, arg_string)) for bit in bits: if bit == '!': if key: add_args() args = [] key = None not_arg = True elif bit.startswith('-'): if key: add_args() args = [] not_arg = False key = bit else: args.append(bit) if key: add_args() if 'extras' in definition: definition['extras'] = set(definition['extras']) return definition","Parse one iptables rule. Returns a dict where each iptables code argument is mapped to a name using IPTABLES_ARGS." "def stop_deps(self, conf, images): """"""Stop the containers for all our dependencies"""""" for dependency, _ in conf.dependency_images(): self.stop_deps(images[dependency], images) try: self.stop_container(images[dependency], fail_on_bad_exit=True, fail_reason=""Failed to run dependency container"") except BadImage: raise except Exception as error: log.warning(""Failed to stop dependency container\timage=%s\tdependency=%s\tcontainer_name=%s\terror=%s"", conf.name, dependency, images[dependency].container_name, error)",Stop the containers for all our dependencies "def marvcli_develop_server(port, public): """"""Run development webserver. ATTENTION: By default it is only served on localhost. To run it within a container and access it from the outside, you need to forward the port and tell it to listen on all IPs instead of only localhost. """""" from flask_cors import CORS app = create_app(push=False) app.site.load_for_web() CORS(app) class IPDBMiddleware(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): appiter = self.app(environ, start_response) for item in appiter: yield item app.debug = True if IPDB: app.wsgi_app = IPDBMiddleware(app.wsgi_app) app.run(use_debugger=False, use_reloader=False, host=('0.0.0.0' if public else '127.0.0.1'), port=port, threaded=False) else: app.run(host=('0.0.0.0' if public else '127.0.0.1'), port=port, reloader_type='watchdog', threaded=False)","Run development webserver. ATTENTION: By default it is only served on localhost. To run it within a container and access it from the outside, you need to forward the port and tell it to listen on all IPs instead of only localhost." "def _bool_encode(self, d): """""" Converts bool values to lowercase strings """""" for k, v in d.items(): if isinstance(v, bool): d[k] = str(v).lower() return d",Converts bool values to lowercase strings "def send_spyder_msg(self, spyder_msg_type, content=None, data=None): """""" Publish custom messages to the Spyder frontend. Parameters ---------- spyder_msg_type: str The spyder message type content: dict The (JSONable) content of the message data: any Any object that is serializable by cloudpickle (should be most things). Will arrive as cloudpickled bytes in `.buffers[0]`. """""" import cloudpickle if content is None: content = {} content['spyder_msg_type'] = spyder_msg_type msg = self.session.send( self.iopub_socket, 'spyder_msg', content=content, buffers=[cloudpickle.dumps(data, protocol=PICKLE_PROTOCOL)], parent=self._parent_header, ) self.log.debug(msg)","Publish custom messages to the Spyder frontend. Parameters ---------- spyder_msg_type: str The spyder message type content: dict The (JSONable) content of the message data: any Any object that is serializable by cloudpickle (should be most things). Will arrive as cloudpickled bytes in `.buffers[0]`." "def inviteByEmail(self, emails, subject, text, html, role=""org_user"", mustApprove=True, expiration=1440): """"""Invites a user or users to a site. Inputs: emails - comma seperated list of emails subject - title of email text - email text html - email text in html role - site role (can't be administrator) mustApprove - verifies if user that is join must be approved by an administrator expiration - time in seconds. Default is 1 day 1440 """""" url = self.root + ""/inviteByEmail"" params = { ""f"" : ""json"", ""emails"": emails, ""subject"": subject, ""text"": text, ""html"" : html, ""role"" : role, ""mustApprove"": mustApprove, ""expiration"" : expiration } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)","Invites a user or users to a site. Inputs: emails - comma seperated list of emails subject - title of email text - email text html - email text in html role - site role (can't be administrator) mustApprove - verifies if user that is join must be approved by an administrator expiration - time in seconds. Default is 1 day 1440" "def post(self, **kwargs): """"""Send a POST request to the currently loaded website's URL. The browser will automatically fill out the form. If `data` dict has been passed into ``kwargs``, the contained input values will override the automatically filled out values. Returns: `Response` object of a successful request. Raises: NoWebsiteLoadedError: If no website is currently loaded. """""" if self._url is None: raise NoWebsiteLoadedError('request submission requires a loaded website') data = kwargs.get('data', {}) for i in self.soup('form').select('input[name]'): if i.get('name') not in data: data[i.get('name')] = i.get('value', '') kwargs['data'] = data response = self.session.post(self._url, **kwargs) self._url = response.url self._response = response return response","Send a POST request to the currently loaded website's URL. The browser will automatically fill out the form. If `data` dict has been passed into ``kwargs``, the contained input values will override the automatically filled out values. Returns: `Response` object of a successful request. Raises: NoWebsiteLoadedError: If no website is currently loaded." "def get_contact(self, jid): """""" Returns a contact Args: jid (aioxmpp.JID): jid of the contact Returns: dict: the roster of contacts """""" try: return self.get_contacts()[jid.bare()] except KeyError: raise ContactNotFound except AttributeError: raise AttributeError(""jid must be an aioxmpp.JID object"")","Returns a contact Args: jid (aioxmpp.JID): jid of the contact Returns: dict: the roster of contacts" "def smooth_l1_loss(input, target, beta=1. / 9, size_average=True): """""" very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter """""" n = torch.abs(input - target) cond = n < beta loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta) if size_average: return loss.mean() return loss.sum()","very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter" "def read_busiest_date(path: str) -> Tuple[datetime.date, FrozenSet[str]]: """"""Find the earliest date with the most trips"""""" feed = load_raw_feed(path) return _busiest_date(feed)",Find the earliest date with the most trips "def Normal(cls, mean: 'TensorFluent', variance: 'TensorFluent', batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']: '''Returns a TensorFluent for the Normal sampling op with given mean and variance. Args: mean: The mean parameter of the Normal distribution. variance: The variance parameter of the Normal distribution. batch_size: The size of the batch (optional). Returns: The Normal distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope. ''' if mean.scope != variance.scope: raise ValueError('Normal distribution: parameters must have same scope!') loc = mean.tensor scale = tf.sqrt(variance.tensor) dist = tf.distributions.Normal(loc, scale) batch = mean.batch or variance.batch if not batch and batch_size is not None: t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = mean.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))","Returns a TensorFluent for the Normal sampling op with given mean and variance. Args: mean: The mean parameter of the Normal distribution. variance: The variance parameter of the Normal distribution. batch_size: The size of the batch (optional). Returns: The Normal distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope." "def get_socket(self, inbound): """"""Retrieve a socket used by this connection When inbound is True, then the socket from which this connection reads data is retrieved. Otherwise the socket to which this connection writes data is retrieved. Read and write sockets differ depending on whether this is a server- or a client-side connection, and on whether a routing demux is in use. """""" if inbound and hasattr(self, ""_rsock""): return self._rsock return self._sock","Retrieve a socket used by this connection When inbound is True, then the socket from which this connection reads data is retrieved. Otherwise the socket to which this connection writes data is retrieved. Read and write sockets differ depending on whether this is a server- or a client-side connection, and on whether a routing demux is in use." "def attachment_to_multidim_measurement(attachment, name=None): """"""Convert an OpenHTF test record attachment to a multi-dim measurement. This is a best effort attempt to reverse, as some data is lost in converting from a multidim to an attachment. Args: attachment: an `openhtf.test_record.Attachment` from a multi-dim. name: an optional name for the measurement. If not provided will use the name included in the attachment. Returns: An multi-dim `openhtf.Measurement`. """""" data = json.loads(attachment.data) name = name or data.get('name') # attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code' attachment_dims = data.get('dimensions', []) # attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]] attachment_values = data.get('value') attachment_outcome_str = data.get('outcome') if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME: # Fpr backward compatibility with saved data we'll convert integers to str try: attachment_outcome_str = test_runs_pb2.Status.Name( int(attachment_outcome_str)) except ValueError: attachment_outcome_str = None # Convert test status outcome str to measurement outcome outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get( attachment_outcome_str) # convert dimensions into htf.Dimensions _lazy_load_units_by_code() dims = [] for d in attachment_dims: # Try to convert into htf.Dimension including backwards compatibility. unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE) description = d.get('name', '') dims.append(measurements.Dimension(description=description, unit=unit)) # Attempt to determine if units are included. if attachment_values and len(dims) == len(attachment_values[0]): # units provided units_ = dims[-1].unit dimensions = dims[:-1] else: units_ = None dimensions = dims # created dimensioned_measured_value and populate with values. measured_value = measurements.DimensionedMeasuredValue( name=name, num_dimensions=len(dimensions) ) for row in attachment_values: coordinates = tuple(row[:-1]) val = row[-1] measured_value[coordinates] = val measurement = measurements.Measurement( name=name, units=units_, dimensions=tuple(dimensions), measured_value=measured_value, outcome=outcome ) return measurement","Convert an OpenHTF test record attachment to a multi-dim measurement. This is a best effort attempt to reverse, as some data is lost in converting from a multidim to an attachment. Args: attachment: an `openhtf.test_record.Attachment` from a multi-dim. name: an optional name for the measurement. If not provided will use the name included in the attachment. Returns: An multi-dim `openhtf.Measurement`." "def _handle_response(self): """""" returns RESTBase response if appropriate """""" content = self.cache['restbase']['info']['content-type'] if content.startswith('text/html'): html = self.cache['restbase']['response'] if isinstance(html, bytes): html = html.decode('utf-8') self.data['html'] = html return response = self._load_response('restbase') http_status = self.cache['restbase']['info']['status'] if http_status == 404: raise LookupError(self.cache['restbase']['query']) if self.params.get('endpoint') == '/page/': msg = ""RESTBase /page/ entry points: %s"" % response.get('items') utils.stderr(msg) del self.cache['restbase'] return return response",returns RESTBase response if appropriate "def sg_log(tensor, opt): r""""""Log transform a dense tensor See `tf.log()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: name: If provided, replace current tensor's name. Returns: A `Tensor`. """""" return tf.log(tensor + tf.sg_eps, name=opt.name)","r""""""Log transform a dense tensor See `tf.log()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: name: If provided, replace current tensor's name. Returns: A `Tensor`." "def ximshow_unrectified(self, slitlet2d): """"""Display unrectified image with spectrails and frontiers. Parameters ---------- slitlet2d : numpy array Array containing the unrectified slitlet image. """""" title = ""Slitlet#"" + str(self.islitlet) ax = ximshow(slitlet2d, title=title, first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig), show=False) xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) ylower = self.list_spectrails[0](xdum) ax.plot(xdum, ylower, 'b-') ymiddle = self.list_spectrails[1](xdum) ax.plot(xdum, ymiddle, 'b--') yupper = self.list_spectrails[2](xdum) ax.plot(xdum, yupper, 'b-') ylower_frontier = self.list_frontiers[0](xdum) ax.plot(xdum, ylower_frontier, 'b:') yupper_frontier = self.list_frontiers[1](xdum) ax.plot(xdum, yupper_frontier, 'b:') pause_debugplot(debugplot=self.debugplot, pltshow=True)","Display unrectified image with spectrails and frontiers. Parameters ---------- slitlet2d : numpy array Array containing the unrectified slitlet image." "def new_netting_channel( self, partner: Address, settle_timeout: int, given_block_identifier: BlockSpecification, ) -> ChannelID: """""" Creates a new channel in the TokenNetwork contract. Args: partner: The peer to open the channel with. settle_timeout: The settle timeout to use for this channel. given_block_identifier: The block identifier of the state change that prompted this proxy action Returns: The ChannelID of the new netting channel. """""" checking_block = self.client.get_checking_block() self._new_channel_preconditions( partner=partner, settle_timeout=settle_timeout, block_identifier=given_block_identifier, ) log_details = { 'peer1': pex(self.node_address), 'peer2': pex(partner), } gas_limit = self.proxy.estimate_gas( checking_block, 'openChannel', participant1=self.node_address, participant2=partner, settle_timeout=settle_timeout, ) if not gas_limit: self.proxy.jsonrpc_client.check_for_insufficient_eth( transaction_name='openChannel', transaction_executed=False, required_gas=GAS_REQUIRED_FOR_OPEN_CHANNEL, block_identifier=checking_block, ) self._new_channel_postconditions( partner=partner, block=checking_block, ) log.critical('new_netting_channel call will fail', **log_details) raise RaidenUnrecoverableError('Creating a new channel will fail') log.debug('new_netting_channel called', **log_details) # Prevent concurrent attempts to open a channel with the same token and # partner address. if gas_limit and partner not in self.open_channel_transactions: new_open_channel_transaction = AsyncResult() self.open_channel_transactions[partner] = new_open_channel_transaction gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_OPEN_CHANNEL) try: transaction_hash = self.proxy.transact( 'openChannel', gas_limit, participant1=self.node_address, participant2=partner, settle_timeout=settle_timeout, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: self._new_channel_postconditions( partner=partner, block=receipt_or_none['blockNumber'], ) log.critical('new_netting_channel failed', **log_details) raise RaidenUnrecoverableError('creating new channel failed') except Exception as e: log.critical('new_netting_channel failed', **log_details) new_open_channel_transaction.set_exception(e) raise else: new_open_channel_transaction.set(transaction_hash) finally: self.open_channel_transactions.pop(partner, None) else: # All other concurrent threads should block on the result of opening this channel self.open_channel_transactions[partner].get() channel_identifier: ChannelID = self._detail_channel( participant1=self.node_address, participant2=partner, block_identifier='latest', ).channel_identifier log_details['channel_identifier'] = str(channel_identifier) log.info('new_netting_channel successful', **log_details) return channel_identifier","Creates a new channel in the TokenNetwork contract. Args: partner: The peer to open the channel with. settle_timeout: The settle timeout to use for this channel. given_block_identifier: The block identifier of the state change that prompted this proxy action Returns: The ChannelID of the new netting channel." "def cast_bytes(s, encoding='utf8', errors='strict'): """"""cast str or bytes to bytes"""""" if isinstance(s, bytes): return s elif isinstance(s, str): return s.encode(encoding, errors) else: raise TypeError(""Expected unicode or bytes, got %r"" % s)",cast str or bytes to bytes "def cookies(self): """"""Container of request cookies """""" cookies = SimpleCookie() cookie = self.environ.get('HTTP_COOKIE') if cookie: cookies.load(cookie) return cookies",Container of request cookies "def register(self, name, path, plugin, description=None, final_words=None): """""" Registers a new recipe. """""" if name in self.recipes.keys(): raise RecipeExistsException(""Recipe %s was already registered by %s"" % (name, self.recipes[""name""].plugin.name)) self.recipes[name] = Recipe(name, path, plugin, description, final_words) self.__log.debug(""Recipe %s registered by %s"" % (name, plugin.name)) return self.recipes[name]",Registers a new recipe. "async def set_sound_settings(self, target: str, value: str): """"""Change a sound setting."""""" params = {""settings"": [{""target"": target, ""value"": value}]} return await self.services[""audio""][""setSoundSettings""](params)",Change a sound setting. "def write_report(self, session, filename): """""" Writes the report in JSON format to the given file """""" if not self.__report: session.write_line(""No report to write down"") return try: with open(filename, ""w+"") as out_file: out_file.write(self.to_json(self.__report)) except IOError as ex: session.write_line(""Error writing to file: {0}"", ex)",Writes the report in JSON format to the given file "def query_job_ids(build_version, config, log): """"""Get one or more job IDs and their status associated with a build version. Filters jobs by name if --job-name is specified. :raise HandledError: On invalid JSON data or bad job name. :param str build_version: AppVeyor build version from query_build_version(). :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of two-item tuples. Job ID (first) and its status (second). :rtype: list """""" url = '/projects/{0}/{1}/build/{2}'.format(config['owner'], config['repo'], build_version) # Query version. log.debug('Querying AppVeyor version API for %s/%s at %s...', config['owner'], config['repo'], build_version) json_data = query_api(url) if 'build' not in json_data: log.error('Bad JSON reply: ""build"" key missing.') raise HandledError if 'jobs' not in json_data['build']: log.error('Bad JSON reply: ""jobs"" key missing.') raise HandledError # Find AppVeyor job. all_jobs = list() for job in json_data['build']['jobs']: if config['job_name'] and config['job_name'] == job['name']: log.debug('Filtering by job name: found match!') return [(job['jobId'], job['status'])] all_jobs.append((job['jobId'], job['status'])) if config['job_name']: log.error('Job name ""%s"" not found.', config['job_name']) raise HandledError return all_jobs","Get one or more job IDs and their status associated with a build version. Filters jobs by name if --job-name is specified. :raise HandledError: On invalid JSON data or bad job name. :param str build_version: AppVeyor build version from query_build_version(). :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of two-item tuples. Job ID (first) and its status (second). :rtype: list" "def autocomplete_view(self, request): """""" Searches in the fields of the given related model and returns the result as a simple string to be used by the jQuery Autocomplete plugin """""" query = request.GET.get('q', None) app_label = request.GET.get('app_label', None) model_name = request.GET.get('model_name', None) search_fields = request.GET.get('search_fields', None) object_pk = request.GET.get('object_pk', None) try: to_string_function = self.related_string_functions[model_name] except KeyError: to_string_function = lambda x: str(x) if search_fields and app_label and model_name and (query or object_pk): def construct_search(field_name): # use different lookup methods depending on the notation if field_name.startswith('^'): fmt, name = ""{}__istartswith"", field_name[1:] elif field_name.startswith('='): fmt, name = ""{}__iexact"", field_name[1:] elif field_name.startswith('@'): fmt, name = ""{}__search"", field_name[1:] else: fmt, name = ""{}__icontains"", field_name return fmt.format(name) model = apps.get_model(app_label, model_name) queryset = model._default_manager.all() data = '' if query: for bit in query.split(): or_queries = [ models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)}) for field_name in search_fields.split(',') ] other_qs = QuerySet(model) other_qs.query.select_related = queryset.query.select_related other_qs = other_qs.filter(reduce(operator.or_, or_queries)) queryset = queryset & other_qs if self.autocomplete_limit: queryset = queryset[:self.autocomplete_limit] data = ''.join([ '{}|{}\n'.format(to_string_function(f), f.pk) for f in queryset ]) elif object_pk: try: obj = queryset.get(pk=object_pk) except: pass else: data = to_string_function(obj) return HttpResponse(data) return HttpResponseNotFound()","Searches in the fields of the given related model and returns the result as a simple string to be used by the jQuery Autocomplete plugin" "def bundle_biomass_components(model, reaction): """""" Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation. """""" if len(reaction.metabolites) >= 16: return [reaction] id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_mets = [""MNXM3"", ""MNXM2"", ""MNXM7"", ""MNXM1"", 'MNXM9'] try: gam = set([helpers.find_met_in_model( model, met, id_of_main_compartment)[0] for met in gam_mets]) except RuntimeError: gam = set() regex = re.compile('^{}(_[a-zA-Z]+?)*?$'.format('biomass'), re.IGNORECASE) biomass_metabolite = set(model.metabolites.query(regex)) macromolecules = set(reaction.metabolites) - gam - biomass_metabolite bundled_reactions = set() for met in macromolecules: bundled_reactions = bundled_reactions | set(met.reactions) return list(bundled_reactions)","Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation." "def parse(self, msg): """""" Parse an HL7 message and return an HL7 dictionary. :param msg: HL7 message to parse :return: An HL7 dictionary """""" #init dictValues = HL7Dict(self.tersersep) msg_ = msg.strip('\r\n ') # extracts separator defined in the message itself self.extractSeparators(dictValues, msg_) msg_ = msg_.replace('\r', '\n') lines = msg_.split('\n') lineNumber = 1 # build the map of segments segmentNameCount, lineMap = self.buildSegmentMap(lines) dictValues.setSegmentsMap(segmentNameCount, lineMap) # Parse each line of the message : 1 line = 1 segment for line in lines: dictValues.currentLineNumber = lineNumber self.extractValues(dictValues, line) lineNumber += 1 return dictValues","Parse an HL7 message and return an HL7 dictionary. :param msg: HL7 message to parse :return: An HL7 dictionary" "def handle(send, msg, args): """"""Implements several XKCD comics."""""" output = textutils.gen_xkcd_sub(msg, True) if output is None: return if args['type'] == 'action': send(""correction: * %s %s"" % (args['nick'], output)) else: send(""%s actually meant: %s"" % (args['nick'], output))",Implements several XKCD comics. "def include(self, *terms): """"""Add new terms to the current ontology. Raises: TypeError: when the arguments is (are) neither a TermList nor a Term. Note: This will also recursively include terms in the term's relations dictionnary, but it is considered bad practice to do so. If you want to create your own ontology, you should only add an ID (such as 'ONT:001') to your terms relations, and let the Ontology link terms with each other. Examples: Create a new ontology from scratch >>> from pronto import Term, Relationship >>> t1 = Term('ONT:001','my 1st term', ... 'this is my first term') >>> t2 = Term('ONT:002', 'my 2nd term', ... 'this is my second term', ... {Relationship('part_of'): ['ONT:001']}) >>> ont = Ontology() >>> ont.include(t1, t2) >>> >>> 'ONT:002' in ont True >>> ont['ONT:001'].children [] """""" ref_needed = False for term in terms: if isinstance(term, TermList): ref_needed = ref_needed or self._include_term_list(term) elif isinstance(term, Term): ref_needed = ref_needed or self._include_term(term) else: raise TypeError('include only accepts or as arguments') self.adopt() self.reference()","Add new terms to the current ontology. Raises: TypeError: when the arguments is (are) neither a TermList nor a Term. Note: This will also recursively include terms in the term's relations dictionnary, but it is considered bad practice to do so. If you want to create your own ontology, you should only add an ID (such as 'ONT:001') to your terms relations, and let the Ontology link terms with each other. Examples: Create a new ontology from scratch >>> from pronto import Term, Relationship >>> t1 = Term('ONT:001','my 1st term', ... 'this is my first term') >>> t2 = Term('ONT:002', 'my 2nd term', ... 'this is my second term', ... {Relationship('part_of'): ['ONT:001']}) >>> ont = Ontology() >>> ont.include(t1, t2) >>> >>> 'ONT:002' in ont True >>> ont['ONT:001'].children []" "def decode_obj_table(table_entries, plugin): """"""Return root of obj table. Converts user-class objects"""""" entries = [] for entry in table_entries: if isinstance(entry, Container): assert not hasattr(entry, '__recursion_lock__') user_obj_def = plugin.user_objects[entry.classID] assert entry.version == user_obj_def.version entry = Container(class_name=entry.classID, **dict(zip(user_obj_def.defaults.keys(), entry.values))) entries.append(entry) return decode_network(entries)",Return root of obj table. Converts user-class objects "def check_solver(self, image_x, image_y, kwargs_lens): """""" returns the precision of the solver to match the image position :param kwargs_lens: full lens model (including solved parameters) :param image_x: point source in image :param image_y: point source in image :return: precision of Euclidean distances between the different rays arriving at the image positions """""" source_x, source_y = self._lensModel.ray_shooting(image_x, image_y, kwargs_lens) dist = np.sqrt((source_x - source_x[0]) ** 2 + (source_y - source_y[0]) ** 2) return dist","returns the precision of the solver to match the image position :param kwargs_lens: full lens model (including solved parameters) :param image_x: point source in image :param image_y: point source in image :return: precision of Euclidean distances between the different rays arriving at the image positions" "def detect_column_renamings(self, table_differences): """""" Try to find columns that only changed their names. :type table_differences: TableDiff """""" rename_candidates = {} for added_column_name, added_column in table_differences.added_columns.items(): for removed_column in table_differences.removed_columns.values(): if len(self.diff_column(added_column, removed_column)) == 0: if added_column.get_name() not in rename_candidates: rename_candidates[added_column.get_name()] = [] rename_candidates[added_column.get_name()] = (removed_column, added_column, added_column_name) for candidate_columns in rename_candidates.values(): if len(candidate_columns) == 1: removed_column, added_column, _ = candidate_columns[0] removed_column_name = removed_column.get_name().lower() added_column_name = added_column.get_name().lower() if removed_column_name not in table_differences.renamed_columns: table_differences.renamed_columns[removed_column_name] = added_column del table_differences.added_columns[added_column_name] del table_differences.removed_columns[removed_column_name]","Try to find columns that only changed their names. :type table_differences: TableDiff" "def __loadindcomps(self): ''' import industry comps ''' csv_path = os.path.join(os.path.dirname(__file__), self.stock_no_files) with open(csv_path) as csv_file: csv_data = csv.reader(csv_file) result = {} check_words = re.compile(r'^[\d]{2,}[\w]?') for i in csv_data: if check_words.match(i[2]): try: result[i[2]].append(i[0].decode('utf-8')) except (ValueError, KeyError): try: result[i[2]] = [i[0].decode('utf-8')] except KeyError: pass return result",import industry comps "def remove_sample(self, md5): """"""Delete a specific sample"""""" # Grab the sample record = self.database[self.sample_collection].find_one({'md5': md5}) if not record: return # Delete it print 'Deleting sample: %s (%.2f MB)...' % (record['md5'], record['length']/1024.0/1024.0) self.database[self.sample_collection].remove({'md5': record['md5']}) self.gridfs_handle.delete(record['__grid_fs']) # Print info print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap)",Delete a specific sample "async def build_verify_payment_req(wallet_handle: int, submitter_did: str, receipt: str) -> (str, str): """""" Builds Indy request for information to verify the payment receipt :param wallet_handle: wallet handle (created by open_wallet). :param submitter_did : (Option) DID of request sender :param receipt: payment receipt to verify :return: verify_txn_json: Indy request for verification receipt for transactions in the ledger payment_method: used payment method """""" logger = logging.getLogger(__name__) logger.debug(""build_verify_payment_req: >>> wallet_handle: %r, submitter_did: %r, receipt: %r"", wallet_handle, submitter_did, receipt) if not hasattr(build_verify_payment_req, ""cb""): logger.debug(""build_verify_payment_req: Creating callback"") build_verify_payment_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None c_receipt = c_char_p(receipt.encode('utf-8')) (verify_txn_json, payment_method) = await do_call('indy_build_verify_payment_req', c_wallet_handle, c_submitter_did, c_receipt, build_verify_payment_req.cb) res = (verify_txn_json.decode(), payment_method.decode()) logger.debug(""build_verify_payment_req: <<< res: %r"", res) return res","Builds Indy request for information to verify the payment receipt :param wallet_handle: wallet handle (created by open_wallet). :param submitter_did : (Option) DID of request sender :param receipt: payment receipt to verify :return: verify_txn_json: Indy request for verification receipt for transactions in the ledger payment_method: used payment method" "def set_link(self, prop, value): """""" Set given link in CTS Namespace .. example:: collection.set_link(NAMESPACES.CTS.about, ""urn:cts:latinLit:phi1294.phi002"") :param prop: Property to set (Without namespace) :param value: Value to set for given property """""" # https://rdflib.readthedocs.io/en/stable/ # URIRef == identifiers (urn, http, URI in general) # Literal == String or Number (can have a language) # BNode == Anonymous nodes (So no specific identifier) # eg. BNode : Edition(MartialEpigrams:URIRef) ---has_metadata--> Metadata(BNode) if not isinstance(value, URIRef): value = URIRef(value) self.metadata.add(prop, value)","Set given link in CTS Namespace .. example:: collection.set_link(NAMESPACES.CTS.about, ""urn:cts:latinLit:phi1294.phi002"") :param prop: Property to set (Without namespace) :param value: Value to set for given property" "def get_stats(self): """""" Return performance data for the module responder. :returns: Dict containing keys: * `get_module_count`: Integer count of :data:`mitogen.core.GET_MODULE` messages received. * `get_module_secs`: Floating point total seconds spent servicing :data:`mitogen.core.GET_MODULE` requests. * `good_load_module_count`: Integer count of successful :data:`mitogen.core.LOAD_MODULE` messages sent. * `good_load_module_size`: Integer total bytes sent in :data:`mitogen.core.LOAD_MODULE` message payloads. * `bad_load_module_count`: Integer count of negative :data:`mitogen.core.LOAD_MODULE` messages sent. * `minify_secs`: CPU seconds spent minifying modules marked minify-safe. """""" return { 'get_module_count': self.responder.get_module_count, 'get_module_secs': self.responder.get_module_secs, 'good_load_module_count': self.responder.good_load_module_count, 'good_load_module_size': self.responder.good_load_module_size, 'bad_load_module_count': self.responder.bad_load_module_count, 'minify_secs': self.responder.minify_secs, }","Return performance data for the module responder. :returns: Dict containing keys: * `get_module_count`: Integer count of :data:`mitogen.core.GET_MODULE` messages received. * `get_module_secs`: Floating point total seconds spent servicing :data:`mitogen.core.GET_MODULE` requests. * `good_load_module_count`: Integer count of successful :data:`mitogen.core.LOAD_MODULE` messages sent. * `good_load_module_size`: Integer total bytes sent in :data:`mitogen.core.LOAD_MODULE` message payloads. * `bad_load_module_count`: Integer count of negative :data:`mitogen.core.LOAD_MODULE` messages sent. * `minify_secs`: CPU seconds spent minifying modules marked minify-safe." "def hid(manufacturer: str, serial_number: str, model: str) -> str: """"""Computes the HID for the given properties of a device. The HID is suitable to use to an URI."""""" return Naming.url_word(manufacturer) + '-' + Naming.url_word(serial_number) + '-' + Naming.url_word(model)",Computes the HID for the given properties of a device. The HID is suitable to use to an URI. "def __get_distribution_tags(self, client, arn): """"""Returns a dict containing the tags for a CloudFront distribution Args: client (botocore.client.CloudFront): Boto3 CloudFront client object arn (str): ARN of the distribution to get tags for Returns: `dict` """""" return { t['Key']: t['Value'] for t in client.list_tags_for_resource( Resource=arn )['Tags']['Items'] }","Returns a dict containing the tags for a CloudFront distribution Args: client (botocore.client.CloudFront): Boto3 CloudFront client object arn (str): ARN of the distribution to get tags for Returns: `dict`" "def dkim_sign(message, dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None): """"""Return signed email message if dkim package and settings are available."""""" try: import dkim except ImportError: pass else: if dkim_domain and dkim_key: sig = dkim.sign(message, dkim_selector, dkim_domain, dkim_key, include_headers=dkim_headers) message = sig + message return message",Return signed email message if dkim package and settings are available. "def run_daemon(self): """""" Used as daemon starter. Warning: DO NOT OVERRIDE THIS. """""" try: self.daemon_runner.do_action() except daemon.runner.DaemonRunnerStopFailureError: self.onStopFail() except SystemExit: self.onExit()","Used as daemon starter. Warning: DO NOT OVERRIDE THIS." "def get_parent_ids(self): """"""Gets the parents of this node. return: (osid.id.IdList) - the parents of this node *compliance: mandatory -- This method must be implemented.* """""" id_list = [] from ..id.objects import IdList for parent_node in self._my_map['parentNodes']: id_list.append(str(parent_node.ident)) return IdList(id_list)","Gets the parents of this node. return: (osid.id.IdList) - the parents of this node *compliance: mandatory -- This method must be implemented.*" "def _getslice(self, maps): """"""Determines how to slice the scratch for returning values."""""" invals = maps[list(self.inputs)[0]] if not isinstance(invals, (numpy.ndarray, list)): getslice = 0 else: getslice = slice(None, None) return getslice",Determines how to slice the scratch for returning values. "def count_open_fds(): """"""return the number of open file descriptors for current process. .. warning: will only work on UNIX-like os-es. http://stackoverflow.com/a/7142094 """""" pid = os.getpid() procs = subprocess.check_output( ['lsof', '-w', '-Ff', '-p', str(pid)]) nprocs = len( [s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()] ) return nprocs","return the number of open file descriptors for current process. .. warning: will only work on UNIX-like os-es. http://stackoverflow.com/a/7142094" "def to_series(self): """""" Serialize the fit to a pandas.Series. The index on the series gives the bin edges and the valeus give the CDF. Returns ------- pandas.Series """""" return pandas.Series( self.cdf, index=[numpy.nan] + list(self.bin_edges) + [numpy.nan])","Serialize the fit to a pandas.Series. The index on the series gives the bin edges and the valeus give the CDF. Returns ------- pandas.Series" "def encode(self, label): """""" Encodes a ``label``. Args: label (object): Label to encode. Returns: torch.Tensor: Encoding of the label. """""" label = super().encode(label) return torch.tensor(self.stoi.get(label, self.unknown_index))","Encodes a ``label``. Args: label (object): Label to encode. Returns: torch.Tensor: Encoding of the label." "def set_priorities(SO_methods, ask): """""" figure out which sample_azimuth to use, if multiple orientation methods """""" # if ask set to 1, then can change priorities SO_methods = [meth.strip() for meth in SO_methods] SO_defaults = ['SO-SUN', 'SO-GPS-DIFF', 'SO-SUN-SIGHT', 'SO-SIGHT', 'SO-SIGHT-BS', 'SO-CMD-NORTH', 'SO-MAG', 'SO-SM', 'SO-REC', 'SO-V', 'SO-CORE', 'SO-NO'] SO_priorities, prior_list = [], [] if len(SO_methods) >= 1: for l in range(len(SO_defaults)): if SO_defaults[l] in SO_methods: SO_priorities.append(SO_defaults[l]) pri, change = 0, ""1"" if ask == 1: print(""""""These methods of sample orientation were found: They have been assigned a provisional priority (top = zero, last = highest number) """""") for m in range(len(SO_defaults)): if SO_defaults[m] in SO_methods: SO_priorities[SO_methods.index(SO_defaults[m])] = pri pri += 1 while change == ""1"": prior_list = SO_priorities for m in range(len(SO_methods)): print(SO_methods[m], SO_priorities[m]) change = input(""Change these? 1/[0] "") if change != ""1"": break SO_priorities = [] for l in range(len(SO_methods)): print(SO_methods[l]) print("" Priority? "", prior_list) pri = int(input()) SO_priorities.append(pri) del prior_list[prior_list.index(pri)] return SO_priorities","figure out which sample_azimuth to use, if multiple orientation methods" "def make_ready_current(self): """""" Marks all targets in a task ready for execution if any target is not current. This is the default behavior for building only what's necessary. """""" global print_prepare T = self.tm.trace if T: T.write(self.trace_message(u'Task.make_ready_current()', self.node)) self.out_of_date = [] needs_executing = False for t in self.targets: try: t.disambiguate().make_ready() is_up_to_date = not t.has_builder() or \ (not t.always_build and t.is_up_to_date()) except EnvironmentError as e: raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) if not is_up_to_date: self.out_of_date.append(t) needs_executing = True if needs_executing: for t in self.targets: t.set_state(NODE_EXECUTING) for s in t.side_effects: # add disambiguate here to mirror the call on targets in first loop above s.disambiguate().set_state(NODE_EXECUTING) else: for t in self.targets: # We must invoke visited() to ensure that the node # information has been computed before allowing the # parent nodes to execute. (That could occur in a # parallel build...) t.visited() t.set_state(NODE_UP_TO_DATE) if (not print_prepare and (not hasattr(self, 'options') or not self.options.debug_includes)): t.release_target_info()","Marks all targets in a task ready for execution if any target is not current. This is the default behavior for building only what's necessary." "def _parse_value(self, html_data, field): """""" Parse the HTML table to find the requested field's value. All of the values are passed in an HTML table row instead of as individual items. The values need to be parsed by matching the requested attribute with a parsing scheme that sports-reference uses to differentiate stats. This function returns a single value for the given attribute. Parameters ---------- html_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. field : string The name of the attribute to match. Field must be a key in the PLAYER_SCHEME dictionary. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None. """""" scheme = PLAYER_SCHEME[field] items = [i.text() for i in html_data(scheme).items()] # Stats can be added and removed on a yearly basis. If no stats are # found, return None and have that be the value. if len(items) == 0: return None return items","Parse the HTML table to find the requested field's value. All of the values are passed in an HTML table row instead of as individual items. The values need to be parsed by matching the requested attribute with a parsing scheme that sports-reference uses to differentiate stats. This function returns a single value for the given attribute. Parameters ---------- html_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. field : string The name of the attribute to match. Field must be a key in the PLAYER_SCHEME dictionary. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None." "def get_model_field(model, field_name): """"""Return a field given a model and field name. Arguments: model: a Django model field_name: the name of a field Returns: A Django field if `field_name` is a valid field for `model`, None otherwise. """""" meta = model._meta try: if DJANGO19: field = meta.get_field(field_name) else: field = meta.get_field_by_name(field_name)[0] return field except: if DJANGO19: related_objs = ( f for f in meta.get_fields() if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete ) related_m2m_objs = ( f for f in meta.get_fields(include_hidden=True) if f.many_to_many and f.auto_created ) else: related_objs = meta.get_all_related_objects() related_m2m_objs = meta.get_all_related_many_to_many_objects() related_objects = { o.get_accessor_name(): o for o in chain(related_objs, related_m2m_objs) } if field_name in related_objects: return related_objects[field_name] else: # check virtual fields (1.7) if hasattr(meta, 'virtual_fields'): for field in meta.virtual_fields: if field.name == field_name: return field raise AttributeError( '%s is not a valid field for %s' % (field_name, model) )","Return a field given a model and field name. Arguments: model: a Django model field_name: the name of a field Returns: A Django field if `field_name` is a valid field for `model`, None otherwise." "async def turn_off(self, switch=None): """"""Turn off relay."""""" if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b""\x10"" + switch + b""\x02"") else: packet = self.protocol.format_packet(b""\x0b"") states = await self._send(packet) return states",Turn off relay. "def clear_deadline(self): """"""Clears the deadline. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """""" # Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template if (self.get_deadline_metadata().is_read_only() or self.get_deadline_metadata().is_required()): raise errors.NoAccess() self._my_map['deadline'] = self._deadline_default","Clears the deadline. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*" "def _update_linked_table(table, col_name, added, copied, removed): """""" Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame """""" logger.debug('start: update linked table after transition') # handle removals table = table.loc[~table[col_name].isin(set(removed))] if (added is None or len(added) == 0): return table # map new IDs to the IDs from which they were copied id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1) # join to linked table and assign new id new_rows = id_map.merge(table, on=col_name) new_rows.drop(col_name, axis=1, inplace=True) new_rows.rename(columns={'temp_id': col_name}, inplace=True) # index the new rows starting_index = table.index.values.max() + 1 new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int) logger.debug('finish: update linked table after transition') return pd.concat([table, new_rows])","Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame" "def export_to_pypsa(self, session, method='onthefly'): """"""Exports MVGridDing0 grid to PyPSA database tables Peculiarities of MV grids are implemented here. Derive general export method from this and adapt to needs of LVGridDing0 Parameters ---------- session: :sqlalchemy:`SQLAlchemy session object` Description method: str Specify export method:: 'db': grid data will be exported to database 'onthefly': grid data will be passed to PyPSA directly (default) Notes ----- It has to be proven that this method works for LV grids as well! Ding0 treats two stationary case of powerflow: 1) Full load: We assume no generation and loads to be set to peak load 2) Generation worst case: """""" # definitions for temp_resolution table temp_id = 1 timesteps = 2 start_time = datetime(1970, 1, 1, 00, 00, 0) resolution = 'H' nodes = self._graph.nodes() edges = [edge for edge in list(self.graph_edges()) if (edge['adj_nodes'][0] in nodes and not isinstance( edge['adj_nodes'][0], LVLoadAreaCentreDing0)) and (edge['adj_nodes'][1] in nodes and not isinstance( edge['adj_nodes'][1], LVLoadAreaCentreDing0))] if method == 'db': # Export node objects: Busses, Loads, Generators pypsa_io.export_nodes(self, session, nodes, temp_id, lv_transformer=False) # Export edges pypsa_io.export_edges(self, session, edges) # Create table about temporal coverage of PF analysis pypsa_io.create_temp_resolution_table(session, timesteps=timesteps, resolution=resolution, start_time=start_time) elif method == 'onthefly': nodes_dict, components_data = pypsa_io.nodes_to_dict_of_dataframes( self, nodes, lv_transformer=False) edges_dict = pypsa_io.edges_to_dict_of_dataframes(self, edges) components = tools.merge_two_dicts(nodes_dict, edges_dict) return components, components_data else: raise ValueError('Sorry, this export method does not exist!')","Exports MVGridDing0 grid to PyPSA database tables Peculiarities of MV grids are implemented here. Derive general export method from this and adapt to needs of LVGridDing0 Parameters ---------- session: :sqlalchemy:`SQLAlchemy session object` Description method: str Specify export method:: 'db': grid data will be exported to database 'onthefly': grid data will be passed to PyPSA directly (default) Notes ----- It has to be proven that this method works for LV grids as well! Ding0 treats two stationary case of powerflow: 1) Full load: We assume no generation and loads to be set to peak load 2) Generation worst case:" "def get_graph(self, run_key, device_name, debug=False): """"""Get the runtime GraphDef proto associated with a run key and a device. Args: run_key: A Session.run kay. device_name: Name of the device in question. debug: Whether the debugger-decoratedgraph is to be retrieved. Returns: A `GraphDef` proto. """""" return self.get_graphs(run_key, debug=debug).get(device_name, None)","Get the runtime GraphDef proto associated with a run key and a device. Args: run_key: A Session.run kay. device_name: Name of the device in question. debug: Whether the debugger-decoratedgraph is to be retrieved. Returns: A `GraphDef` proto." "def parse_example_tensor(examples, train_config, keep_target): """"""Read the csv files. Args: examples: string tensor train_config: training config keep_target: if true, the target column is expected to exist and it is returned in the features dict. Returns: Dict of feature_name to tensor. Target feature is in the dict. """""" csv_header = [] if keep_target: csv_header = train_config['csv_header'] else: csv_header = [name for name in train_config['csv_header'] if name != train_config['target_column']] # record_defaults are used by tf.decode_csv to insert defaults, and to infer # the datatype. record_defaults = [[train_config['csv_defaults'][name]] for name in csv_header] tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors') # I'm not really sure why expand_dims needs to be called. If using regression # models, it errors without it. tensors = [tf.expand_dims(x, axis=1) for x in tensors] tensor_dict = dict(zip(csv_header, tensors)) return tensor_dict","Read the csv files. Args: examples: string tensor train_config: training config keep_target: if true, the target column is expected to exist and it is returned in the features dict. Returns: Dict of feature_name to tensor. Target feature is in the dict." "def compute_srcprob(self,xmlfile=None, overwrite=False): """"""Run the gtsrcprob app with the current model or a user provided xmlfile"""""" for i,c in enumerate(self.components): # compute diffuse response, necessary for srcprob c._diffrsp_app(xmlfile=xmlfile) # compute srcprob c._srcprob_app(xmlfile = xmlfile, overwrite = overwrite)",Run the gtsrcprob app with the current model or a user provided xmlfile "async def turn_off(self, switch=None): """"""Turn off relay."""""" if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b""\x10"" + switch + b""\x02"") else: packet = self.protocol.format_packet(b""\x0b"") states = await self._send(packet) return states",Turn off relay. "def _adjust_nstep(n_step, gamma, obs, actions, rewards, new_obs, dones): """"""Rewrites the given trajectory fragments to encode n-step rewards. reward[i] = ( reward[i] * gamma**0 + reward[i+1] * gamma**1 + ... + reward[i+n_step-1] * gamma**(n_step-1)) The ith new_obs is also adjusted to point to the (i+n_step-1)'th new obs. At the end of the trajectory, n is truncated to fit in the traj length. """""" assert not any(dones[:-1]), ""Unexpected done in middle of trajectory"" traj_length = len(rewards) for i in range(traj_length): for j in range(1, n_step): if i + j < traj_length: new_obs[i] = new_obs[i + j] dones[i] = dones[i + j] rewards[i] += gamma**j * rewards[i + j]","Rewrites the given trajectory fragments to encode n-step rewards. reward[i] = ( reward[i] * gamma**0 + reward[i+1] * gamma**1 + ... + reward[i+n_step-1] * gamma**(n_step-1)) The ith new_obs is also adjusted to point to the (i+n_step-1)'th new obs. At the end of the trajectory, n is truncated to fit in the traj length." "def data( self, previous_data=False, prompt=False, console_row=False, console_row_to_cursor=False, console_row_from_cursor=False ): """""" Return output data. Flags specifies what data to append. If no flags was specified nul-length string returned :param previous_data: If True, then previous output appends :param prompt: If True, then console prompt appends. If console_row or console_row_to_cursor is True, \ then this value is omitted :param console_row: If True, then console prompt and current input appends. :param console_row_to_cursor: If True, then console prompt and current input till cursor appends. \ If console_row is True, then this value is omitted :param console_row_from_cursor: If True, then current input from cursor appends. \ If console_row is True, then this value is omitted :return: str """""" result = '' if previous_data: result += self.__previous_data if prompt or console_row or console_row_to_cursor: result += self.console().prompt() if console_row or (console_row_from_cursor and console_row_to_cursor): result += self.console().row() elif console_row_to_cursor: result += self.console().row()[:self.cursor()] elif console_row_from_cursor: result += self.console().row()[self.cursor():] return result","Return output data. Flags specifies what data to append. If no flags was specified nul-length string returned :param previous_data: If True, then previous output appends :param prompt: If True, then console prompt appends. If console_row or console_row_to_cursor is True, \ then this value is omitted :param console_row: If True, then console prompt and current input appends. :param console_row_to_cursor: If True, then console prompt and current input till cursor appends. \ If console_row is True, then this value is omitted :param console_row_from_cursor: If True, then current input from cursor appends. \ If console_row is True, then this value is omitted :return: str" "def _add_gainloss_to_output(out, data): """"""Add gainloss based on genes, helpful for identifying changes in smaller genes. """""" out_file = ""%s-gainloss.txt"" % os.path.splitext(out[""cns""])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), ""cnvkit.py""), ""gainloss"", ""-s"", out[""cns""], ""-o"", tx_out_file, out[""cnr""]] gender = _get_batch_gender([data]) if gender: cmd += [""--sample-sex"", gender] do.run(cmd, ""CNVkit gainloss"") out[""gainloss""] = out_file return out","Add gainloss based on genes, helpful for identifying changes in smaller genes." "def uma_rs_check_access(self, rpt, path, http_method): """"""Function to be used in a UMA Resource Server to check access. Parameters: * **rpt (string):** RPT or blank value if absent (not send by RP) * **path (string):** Path of resource (e.g. for http://rs.com/phones, /phones should be passed) * **http_method (string):** Http method of RP request (GET, POST, PUT, DELETE) Returns: **dict:** The access information received in the format below. If the access is granted:: { ""access"": ""granted"" } If the access is denied with ticket response:: { ""access"": ""denied"", ""www-authenticate_header"": ""UMA realm='example', as_uri='https://as.example.com', error='insufficient_scope', ticket='016f84e8-f9b9-11e0-bd6f-0021cc6004de'"", ""ticket"": ""016f84e8-f9b9-11e0-bd6f-0021cc6004de"" } If the access is denied without ticket response:: { ""access"": ""denied"" } Raises: ``oxdpython.exceptions.InvalidRequestError`` if the resource is not protected """""" params = {""oxd_id"": self.oxd_id, ""rpt"": rpt, ""path"": path, ""http_method"": http_method} logger.debug(""Sending command `uma_rs_check_access` with params %s"", params) response = self.msgr.request(""uma_rs_check_access"", **params) logger.debug(""Received response: %s"", response) if response['status'] == 'error': if response['data']['error'] == 'invalid_request': raise InvalidRequestError(response['data']) else: raise OxdServerError(response['data']) return response['data']","Function to be used in a UMA Resource Server to check access. Parameters: * **rpt (string):** RPT or blank value if absent (not send by RP) * **path (string):** Path of resource (e.g. for http://rs.com/phones, /phones should be passed) * **http_method (string):** Http method of RP request (GET, POST, PUT, DELETE) Returns: **dict:** The access information received in the format below. If the access is granted:: { ""access"": ""granted"" } If the access is denied with ticket response:: { ""access"": ""denied"", ""www-authenticate_header"": ""UMA realm='example', as_uri='https://as.example.com', error='insufficient_scope', ticket='016f84e8-f9b9-11e0-bd6f-0021cc6004de'"", ""ticket"": ""016f84e8-f9b9-11e0-bd6f-0021cc6004de"" } If the access is denied without ticket response:: { ""access"": ""denied"" } Raises: ``oxdpython.exceptions.InvalidRequestError`` if the resource is not protected" "def convert_batchnorm(builder, layer, input_names, output_names, keras_layer): """""" Convert a Batch Normalization layer. Parameters keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """""" # Get input and output names input_name, output_name = (input_names[0], output_names[0]) axis = keras_layer.axis nb_channels = keras_layer.input_shape[axis] # Set parameters # Parameter arrangement in Keras: gamma, beta, mean, variance idx = 0 gamma, beta = None, None if keras_layer.scale: gamma = keras_layer.get_weights()[idx] idx += 1 if keras_layer.center: beta = keras_layer.get_weights()[idx] idx += 1 mean = keras_layer.get_weights()[idx] std = keras_layer.get_weights()[idx+1] gamma = _np.ones(mean.shape) if gamma is None else gamma beta = _np.zeros(mean.shape) if beta is None else beta # compute adjusted parameters variance = std * std f = 1.0 / _np.sqrt(std + keras_layer.epsilon) gamma1 = gamma*f beta1 = beta - gamma*mean*f mean[:] = 0.0 #mean variance[:] = 1.0 - .00001 #stddev builder.add_batchnorm( name = layer, channels = nb_channels, gamma = gamma1, beta = beta1, mean = mean, variance = variance, input_name = input_name, output_name = output_name)","Convert a Batch Normalization layer. Parameters keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object." "def get_by_name(self, name): """""" returns an object Project which matches name """""" for p in self.project_list: if p.nme == name: return p return None",returns an object Project which matches name "def _send_command(cmd, worker, lbn, target, profile='default', tgt_type='glob'): ''' Send a command to the modjk loadbalancer The minion need to be able to publish the commands to the load balancer cmd: worker_stop - won't get any traffic from the lbn worker_activate - activate the worker worker_disable - will get traffic only for current sessions ''' ret = { 'code': False, 'msg': 'OK', 'minions': [], } # Send the command to target func = 'modjk.{0}'.format(cmd) args = [worker, lbn, profile] response = __salt__['publish.publish'](target, func, args, tgt_type) # Get errors and list of affeced minions errors = [] minions = [] for minion in response: minions.append(minion) if not response[minion]: errors.append(minion) # parse response if not response: ret['msg'] = 'no servers answered the published command {0}'.format( cmd ) return ret elif errors: ret['msg'] = 'the following minions return False' ret['minions'] = errors return ret else: ret['code'] = True ret['msg'] = 'the commad was published successfully' ret['minions'] = minions return ret","Send a command to the modjk loadbalancer The minion need to be able to publish the commands to the load balancer cmd: worker_stop - won't get any traffic from the lbn worker_activate - activate the worker worker_disable - will get traffic only for current sessions" "def _draw(self): """""" Call the drawing API for the main menu widget with the current known terminal size and the terminal. """""" self._window.draw(self._width, self._height, self.terminal)","Call the drawing API for the main menu widget with the current known terminal size and the terminal." "def commit(self): """"""commit current edits."""""" request = self.edits().commit(**self.build_params()).execute() print 'Edit ""%s"" has been committed' % (request['id']) self.edit_id = None",commit current edits. "def special_mode(v): """"""decode Olympus SpecialMode tag in MakerNote"""""" mode1 = { 0: 'Normal', 1: 'Unknown', 2: 'Fast', 3: 'Panorama', } mode2 = { 0: 'Non-panoramic', 1: 'Left to right', 2: 'Right to left', 3: 'Bottom to top', 4: 'Top to bottom', } if not v or (v[0] not in mode1 or v[2] not in mode2): return v return '%s - sequence %d - %s' % (mode1[v[0]], v[1], mode2[v[2]])",decode Olympus SpecialMode tag in MakerNote "def Create(self, urn, aff4_type, mode=""w"", token=None, age=NEWEST_TIME, force_new_version=True, object_exists=False, mutation_pool=None, transaction=None): """"""Creates the urn if it does not already exist, otherwise opens it. If the urn exists and is of a different type, this will also promote it to the specified type. Args: urn: The object to create. aff4_type: The desired type for this object. mode: The desired mode for this object. token: The Security Token to use for opening this item. age: The age policy used to build this object. Only makes sense when mode has ""r"". force_new_version: Forces the creation of a new object in the data_store. object_exists: If we know the object already exists we can skip index creation. mutation_pool: An optional MutationPool object to write to. If not given, the data_store is used directly. transaction: For locked objects, a lock is passed to the object. Returns: An AFF4 object of the desired type and mode. Raises: AttributeError: If the mode is invalid. """""" if not data_store.AFF4Enabled(): raise NotImplementedError(""AFF4 data store has been disabled."") if mode not in [""w"", ""r"", ""rw""]: raise AttributeError(""Invalid mode %s"" % mode) if token is None: token = data_store.default_token if urn is not None: urn = rdfvalue.RDFURN(urn) _ValidateAFF4Type(aff4_type) if ""r"" in mode: # Check to see if an object already exists. try: existing = self.Open( urn, mode=mode, token=token, age=age, transaction=transaction) result = existing.Upgrade(aff4_type) # We can't pass aff4_type into the Open call since it will raise with a # type mismatch. We set it like this so BadGetAttributeError checking # works. if aff4_type: result.aff4_type = aff4_type.__name__ if force_new_version and existing.Get( result.Schema.TYPE) != aff4_type.__name__: result.ForceNewVersion() return result except IOError: pass result = aff4_type( urn, mode=mode, token=token, age=age, aff4_type=aff4_type.__name__, object_exists=object_exists, mutation_pool=mutation_pool, transaction=transaction) result.Initialize() if force_new_version: result.ForceNewVersion() return result","Creates the urn if it does not already exist, otherwise opens it. If the urn exists and is of a different type, this will also promote it to the specified type. Args: urn: The object to create. aff4_type: The desired type for this object. mode: The desired mode for this object. token: The Security Token to use for opening this item. age: The age policy used to build this object. Only makes sense when mode has ""r"". force_new_version: Forces the creation of a new object in the data_store. object_exists: If we know the object already exists we can skip index creation. mutation_pool: An optional MutationPool object to write to. If not given, the data_store is used directly. transaction: For locked objects, a lock is passed to the object. Returns: An AFF4 object of the desired type and mode. Raises: AttributeError: If the mode is invalid." "def _queryset_iterator(qs): """""" Override default iterator to wrap returned items in a publishing sanity-checker ""booby trap"" to lazily raise an exception if DRAFT items are mistakenly returned and mis-used in a public context where only PUBLISHED items should be used. This booby trap is added when all of: - the publishing middleware is active, and therefore able to report accurately whether the request is in a drafts-permitted context - the publishing middleware tells us we are not in a drafts-permitted context, which means only published items should be used. """""" # Avoid double-processing draft items in our custom iterator when we # are in a `PublishingQuerySet` that is also a subclass of the # monkey-patched `UrlNodeQuerySet` if issubclass(type(qs), UrlNodeQuerySet): super_without_boobytrap_iterator = super(UrlNodeQuerySet, qs) else: super_without_boobytrap_iterator = super(PublishingQuerySet, qs) if is_publishing_middleware_active() \ and not is_draft_request_context(): for item in super_without_boobytrap_iterator.iterator(): if getattr(item, 'publishing_is_draft', False): yield DraftItemBoobyTrap(item) else: yield item else: for item in super_without_boobytrap_iterator.iterator(): yield item","Override default iterator to wrap returned items in a publishing sanity-checker ""booby trap"" to lazily raise an exception if DRAFT items are mistakenly returned and mis-used in a public context where only PUBLISHED items should be used. This booby trap is added when all of: - the publishing middleware is active, and therefore able to report accurately whether the request is in a drafts-permitted context - the publishing middleware tells us we are not in a drafts-permitted context, which means only published items should be used." "def cli(self, method): """"""Registers a method on an Object as a CLI route"""""" routes = getattr(method, '_hug_cli_routes', []) routes.append(self.route) method._hug_cli_routes = routes return method",Registers a method on an Object as a CLI route "def handle_bodhi(msg): """""" Given a bodhi message, return the FAS username. """""" if 'bodhi.update.comment' in msg.topic: username = msg.msg['comment']['author'] elif 'bodhi.buildroot_override' in msg.topic: username = msg.msg['override']['submitter'] else: username = msg.msg.get('update', {}).get('submitter') return username","Given a bodhi message, return the FAS username." "def _vcf_alt(base, other_chr, other_pos, isrc, is_first): """"""Create ALT allele line in VCF 4.1 format associating with other paired end. """""" if is_first: pipe = ""["" if isrc else ""]"" out_str = ""{base}{pipe}{chr}:{pos}{pipe}"" else: pipe = ""]"" if isrc else ""["" out_str = ""{pipe}{chr}:{pos}{pipe}{base}"" return out_str.format(pipe=pipe, chr=other_chr, pos=other_pos + 1, base=base)",Create ALT allele line in VCF 4.1 format associating with other paired end. "def get_hook_model(): """""" Returns the Custom Hook model if defined in settings, otherwise the default Hook model. """""" from rest_hooks.models import Hook HookModel = Hook if getattr(settings, 'HOOK_CUSTOM_MODEL', None): HookModel = get_module(settings.HOOK_CUSTOM_MODEL) return HookModel","Returns the Custom Hook model if defined in settings, otherwise the default Hook model." "def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """""" This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. We only transform the action string sequences into logical forms here. """""" best_action_strings = output_dict[""best_action_strings""] # Instantiating an empty world for getting logical forms. world = NlvrLanguage(set()) logical_forms = [] for instance_action_sequences in best_action_strings: instance_logical_forms = [] for action_strings in instance_action_sequences: if action_strings: instance_logical_forms.append(world.action_sequence_to_logical_form(action_strings)) else: instance_logical_forms.append('') logical_forms.append(instance_logical_forms) action_mapping = output_dict['action_mapping'] best_actions = output_dict['best_action_strings'] debug_infos = output_dict['debug_info'] batch_action_info = [] for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)): instance_action_info = [] for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info): action_info = {} action_info['predicted_action'] = predicted_action considered_actions = action_debug_info['considered_actions'] probabilities = action_debug_info['probabilities'] actions = [] for action, probability in zip(considered_actions, probabilities): if action != -1: actions.append((action_mapping[(batch_index, action)], probability)) actions.sort() considered_actions, probabilities = zip(*actions) action_info['considered_actions'] = considered_actions action_info['action_probabilities'] = probabilities action_info['question_attention'] = action_debug_info.get('question_attention', []) instance_action_info.append(action_info) batch_action_info.append(instance_action_info) output_dict[""predicted_actions""] = batch_action_info output_dict[""logical_form""] = logical_forms return output_dict","This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. We only transform the action string sequences into logical forms here." "def ask_yes_no(message='', default=0, title=''): """""" Display a message with choices of Yes and No. returned value: Yes -> True No -> False :ref:`screenshots` :param message: message to be displayed. :param title: window title :param default: default button as boolean (YES=True, NO=False) :rtype: bool """""" return backend_api.opendialog(""ask_yes_no"", dict(message=message, default=default, title=title))","Display a message with choices of Yes and No. returned value: Yes -> True No -> False :ref:`screenshots` :param message: message to be displayed. :param title: window title :param default: default button as boolean (YES=True, NO=False) :rtype: bool" "def _subtitlesAdded(self, path, subNos): """"""When subtitle is added, all syncPoints greater or equal than a new subtitle are incremented."""""" def action(current, count, model, row): _setSubNo(current + count, model, row) def count(current, nos): ret = 0 for no in nos: if current >= no: ret += 1 # consider: current = 0, nos = [0, 1, 2, 3] # in that case, current should be prepended by all nos current += 1 return ret self._changeSubNos(path, subNos, count, action)","When subtitle is added, all syncPoints greater or equal than a new subtitle are incremented." "def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): """""" Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns=""can\'t pass do both"") TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') should not raise warning >>> f(cols='should raise warning', another_param='') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning """""" if mapping is not None and not hasattr(mapping, 'get') and \ not callable(mapping): raise TypeError(""mapping from old to new argument values "" ""must be dict or callable!"") def _deprecate_kwarg(func): @wraps(func) def wrapper(*args, **kwargs): old_arg_value = kwargs.pop(old_arg_name, None) if new_arg_name is None and old_arg_value is not None: msg = ( ""the '{old_name}' keyword is deprecated and will be "" ""removed in a future version. "" ""Please take steps to stop the use of '{old_name}'"" ).format(old_name=old_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) kwargs[old_arg_name] = old_arg_value return func(*args, **kwargs) if old_arg_value is not None: if mapping is not None: if hasattr(mapping, 'get'): new_arg_value = mapping.get(old_arg_value, old_arg_value) else: new_arg_value = mapping(old_arg_value) msg = (""the {old_name}={old_val!r} keyword is deprecated, "" ""use {new_name}={new_val!r} instead"" ).format(old_name=old_arg_name, old_val=old_arg_value, new_name=new_arg_name, new_val=new_arg_value) else: new_arg_value = old_arg_value msg = (""the '{old_name}' keyword is deprecated, "" ""use '{new_name}' instead"" ).format(old_name=old_arg_name, new_name=new_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name, None) is not None: msg = (""Can only specify '{old_name}' or '{new_name}', "" ""not both"").format(old_name=old_arg_name, new_name=new_arg_name) raise TypeError(msg) else: kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) return wrapper return _deprecate_kwarg","Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns=""can\'t pass do both"") TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') should not raise warning >>> f(cols='should raise warning', another_param='') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning" "def ready_to_split(self): """""" Returns true if this node is ready to branch off additional nodes. Returns false otherwise. """""" # Never split if we're a leaf that predicts adequately. threshold = self._tree.leaf_threshold if self._tree.data.is_continuous_class: var = self._class_cdist.variance if var is not None and threshold is not None \ and var <= threshold: return False else: best_prob = self._class_ddist.best_prob if best_prob is not None and threshold is not None \ and best_prob >= threshold: return False return self._tree.auto_grow \ and not self.attr_name \ and self.n >= self._tree.splitting_n","Returns true if this node is ready to branch off additional nodes. Returns false otherwise." "def housecode_to_index(housecode): """"""Convert a X10 housecode to a zero-based index"""""" match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper()) if match: house_index = int(match.group(2)) if 1 <= house_index <= 16: return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1 raise ValueError(""Invalid X10 housecode: %s"" % housecode)",Convert a X10 housecode to a zero-based index "def exit_col_remove_mode(self, event): """""" go back from 'remove cols' mode to normal """""" # close help messge self.toggle_help(event=None, mode='close') # update mode self.remove_cols_mode = False # re-enable all buttons for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]: btn.Enable() # unbind grid click for deletion self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK) # undo visual cues self.grid.SetWindowStyle(wx.DEFAULT) self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT) self.msg_text.SetLabel(self.default_msg_text) self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox()) self.main_sizer.Fit(self) # re-bind self.remove_cols_button self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button) self.remove_cols_button.SetLabel(""Remove columns"")",go back from 'remove cols' mode to normal "def split_join_classification(element, classification_labels, nodes_classification): """""" Add the ""Split"", ""Join"" classification, if the element qualifies for. :param element: an element from BPMN diagram, :param classification_labels: list of labels attached to the element, :param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels. """""" classification_join = ""Join"" classification_split = ""Split"" if len(element[1][consts.Consts.incoming_flow]) >= 2: classification_labels.append(classification_join) if len(element[1][consts.Consts.outgoing_flow]) >= 2: classification_labels.append(classification_split) nodes_classification[element[0]] = classification_labels","Add the ""Split"", ""Join"" classification, if the element qualifies for. :param element: an element from BPMN diagram, :param classification_labels: list of labels attached to the element, :param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels." "def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): """""" Generates a random id based on `size` and `chars` variable. By default it will generate a 16 character long string based on ascii uppercase letters and digits. """""" return ''.join(random.choice(chars) for _ in range(size))","Generates a random id based on `size` and `chars` variable. By default it will generate a 16 character long string based on ascii uppercase letters and digits." "def search_name(self, name, dom=None): """"""name find function abbreviation"""""" if dom is None: dom = self.browser return expect(dom.find_by_name, args=[name])",name find function abbreviation "def input_validate_key_handle(key_handle, name='key_handle'): """""" Input validation for key_handles. """""" if type(key_handle) is not int: try: return key_handle_to_int(key_handle) except pyhsm.exception.YHSM_Error: raise pyhsm.exception.YHSM_WrongInputType(name, int, type(key_handle)) return key_handle",Input validation for key_handles. "def watch(ctx): """"""Watch the directory for changes. Automatically run tests. """""" vcs = ctx.obj['vcs'] event_handler = TestsEventHandler(vcs) observer = Observer() observer.schedule(event_handler, vcs.path, recursive=True) observer.start() click.echo('Watching directory `{path}`. Use ctrl-c to stop.'.format(path=vcs.path)) while observer.isAlive(): observer.join(timeout=1)",Watch the directory for changes. Automatically run tests. "def set_all_neighbors_data(self, data, done, key): """""" Given they 'key' tile's data, assigns this information to all neighboring tiles """""" # The order of this for loop is important because the topleft gets # it's data from the left neighbor, which should have already been # updated... for side in ['left', 'right', 'top', 'bottom', 'topleft', 'topright', 'bottomleft', 'bottomright']: self.set_neighbor_data(side, data, key, 'data') # self.set_neighbor_data(side, todo, key, 'todo') self.set_neighbor_data(side, done, key, 'done')","Given they 'key' tile's data, assigns this information to all neighboring tiles" "def pid(self): """""" The integer PID of the subprocess or None. """""" pf = self.path('cmd.pid') if not os.path.exists(pf): return None with open(pf, 'r') as f: return int(f.read())",The integer PID of the subprocess or None. "def calculate_field_widths(self, width=None, min_label_width=10, min_progress_width=10): """"""Calculate how wide each field should be so we can align them. We always find room for the summaries since these are short and packed with information. If possible, we will also find room for labels, but if this would make the progress bar width shorter than the specified minium then we will shorten the labels, though never below the minium there. If this mean we have bars that are too wide for the terminal, then your terminal needs to be wider. """""" if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] summary_width = self.summary_width() label_width = self.label_width() remaining = width - summary_width - label_width - 2 if remaining >= min_progress_width: progress_width = remaining else: progress_width = min_progress_width remaining = width - summary_width - progress_width - 2 if remaining >= min_label_width: label_width = remaining else: label_width = min_label_width return (label_width, progress_width, summary_width)","Calculate how wide each field should be so we can align them. We always find room for the summaries since these are short and packed with information. If possible, we will also find room for labels, but if this would make the progress bar width shorter than the specified minium then we will shorten the labels, though never below the minium there. If this mean we have bars that are too wide for the terminal, then your terminal needs to be wider." "def transition(self, inputSymbol): """""" Transition between states, returning any outputs. """""" outState, outputSymbols = self._automaton.outputForInput(self._state, inputSymbol) outTracer = None if self._tracer: outTracer = self._tracer(self._state._name(), inputSymbol._name(), outState._name()) self._state = outState return (outputSymbols, outTracer)","Transition between states, returning any outputs." "def fetch(self, method, path, query=None, body=None, timeout=0, **kwargs): """"""send a Message :param method: string, something like ""POST"" or ""GET"" :param path: string, the path part of a uri (eg, /foo/bar) :param body: dict, what you want to send to ""method path"" :param timeout: integer, how long to wait before failing trying to send """""" ret = None if not query: query = {} if not body: body = {} query.update(body) # body takes precedence body = query self.send_count += 1 payload = self.get_fetch_request(method, path, body) attempts = 1 max_attempts = self.attempts success = False while not success: kwargs['timeout'] = timeout try: try: if not self.connected: self.connect(path) with self.wstimeout(**kwargs) as timeout: kwargs['timeout'] = timeout logger.debug('{} send {} attempt {}/{} with timeout {}'.format( self.client_id, payload.uuid, attempts, max_attempts, timeout )) sent_bits = self.ws.send(payload.payload) logger.debug('{} sent {} bytes'.format(self.client_id, sent_bits)) if sent_bits: ret = self.fetch_response(payload, **kwargs) if ret: success = True except websocket.WebSocketConnectionClosedException as e: self.ws.shutdown() raise IOError(""connection is not open but reported it was open: {}"".format(e)) except (IOError, TypeError) as e: logger.debug('{} error on send attempt {}: {}'.format(self.client_id, attempts, e)) success = False finally: if not success: attempts += 1 if attempts > max_attempts: raise else: timeout *= 2 if (attempts / max_attempts) > 0.50: logger.debug( ""{} closing and re-opening connection for next attempt"".format(self.client_id) ) self.close() return ret","send a Message :param method: string, something like ""POST"" or ""GET"" :param path: string, the path part of a uri (eg, /foo/bar) :param body: dict, what you want to send to ""method path"" :param timeout: integer, how long to wait before failing trying to send" "def plot_interactive(mergepkl, noisepkl=None, thresh=6.0, thresh_link=7.0, ignoret=None, savehtml=True, url_path='plots'): """""" Backwards compatible function for making interactive candidate summary plot """""" data = readdata(mergepkl) circleinds = calcinds(data, thresh, ignoret) crossinds = calcinds(data, -1*thresh, ignoret) edgeinds = calcinds(data, thresh_link, ignoret) workdir = os.path.dirname(mergepkl) fileroot = os.path.basename(mergepkl).rstrip('_merge.pkl').lstrip('cands_') logger.info('Total on target time: {} s'.format(calcontime(data, inds=circleinds+crossinds+edgeinds))) if noisepkl: noiseplot = plotnoisecum(noisepkl) else: noiseplot = None combined = plotall(data, circleinds=circleinds, crossinds=crossinds, edgeinds=edgeinds, htmlname=None, noiseplot=noiseplot, url_path=url_path, fileroot=fileroot) if savehtml: output_file(mergepkl.rstrip('.pkl') + '.html') save(combined) else: return combined",Backwards compatible function for making interactive candidate summary plot "def _get_mosaik_nn_args(out_file): """"""Retrieve default neural network files from GitHub to pass to Mosaik. """""" base_nn_url = ""https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/"" out = [] for arg, fname in [(""-annse"", ""2.1.26.se.100.005.ann""), (""-annpe"", ""2.1.26.pe.100.0065.ann"")]: arg_fname = os.path.join(os.path.dirname(out_file), fname) if not file_exists(arg_fname): subprocess.check_call([""wget"", ""-O"", arg_fname, base_nn_url + fname]) out += [arg, arg_fname] return out",Retrieve default neural network files from GitHub to pass to Mosaik. "def request_announcement_email(request, form, obj): """"""Send an announcement request email. form: The announcement request form obj: The announcement request object """""" logger.debug(form.data) teacher_ids = form.data[""teachers_requested""] if not isinstance(teacher_ids, list): teacher_ids = [teacher_ids] logger.debug(teacher_ids) teachers = User.objects.filter(id__in=teacher_ids) logger.debug(teachers) subject = ""News Post Confirmation Request from {}"".format(request.user.full_name) emails = [] for teacher in teachers: emails.append(teacher.tj_email) logger.debug(emails) logger.info(""%s: Announcement request to %s, %s"", request.user, teachers, emails) base_url = request.build_absolute_uri(reverse('index')) data = { ""teachers"": teachers, ""user"": request.user, ""formdata"": form.data, ""info_link"": request.build_absolute_uri(reverse(""approve_announcement"", args=[obj.id])), ""base_url"": base_url } logger.info(""%s: Announcement request %s"", request.user, data) email_send(""announcements/emails/teacher_approve.txt"", ""announcements/emails/teacher_approve.html"", data, subject, emails)","Send an announcement request email. form: The announcement request form obj: The announcement request object" "def installed_capabilities(image=None): ''' List the capabilities installed on the system Args: image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. Raises: NotImplementedError: For all versions of Windows that are not Windows 10 and later. Server editions of Windows use ServerManager instead. Returns: list: A list of installed capabilities CLI Example: .. code-block:: bash salt '*' dism.installed_capabilities ''' if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`installed_capabilities` is not available on this version of ' 'Windows: {0}'.format(__grains__['osversion'])) return _get_components(""Capability Identity"", ""Capabilities"", ""Installed"")","List the capabilities installed on the system Args: image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. Raises: NotImplementedError: For all versions of Windows that are not Windows 10 and later. Server editions of Windows use ServerManager instead. Returns: list: A list of installed capabilities CLI Example: .. code-block:: bash salt '*' dism.installed_capabilities" "def _prepare_fabric_fw_internal(self, tenant_id, fw_dict, is_fw_virt, result): """"""Internal routine to prepare the fabric. This creates an entry in FW DB and runs the SM. """""" if not self.auto_nwk_create: LOG.info(""Auto network creation disabled"") return False try: tenant_name = fw_dict.get('tenant_name') fw_id = fw_dict.get('fw_id') fw_name = fw_dict.get('fw_name') # TODO(padkrish) More than 1 FW per tenant not supported. if tenant_id in self.service_attr and ( result == fw_const.RESULT_FW_CREATE_DONE): LOG.error(""Fabric already prepared for tenant %(tenant)s,"" "" %(name)s"", {'tenant': tenant_id, 'name': tenant_name}) return True if tenant_id not in self.service_attr: self.create_serv_obj(tenant_id) self.service_attr[tenant_id].create_fw_db(fw_id, fw_name, tenant_id) ret = self.run_create_sm(tenant_id, fw_dict, is_fw_virt) if ret: LOG.info(""SM create returned True for Tenant Name "" ""%(tenant)s FW %(fw)s"", {'tenant': tenant_name, 'fw': fw_name}) self.service_attr[tenant_id].set_fabric_create(True) else: LOG.error(""SM create returned False for Tenant Name "" ""%(tenant)s FW %(fw)s"", {'tenant': tenant_name, 'fw': fw_name}) except Exception as exc: LOG.error(""Exception raised in create fabric int %s"", str(exc)) return False return ret","Internal routine to prepare the fabric. This creates an entry in FW DB and runs the SM." "def clearParameters(self): """"""Removes all parameters from model"""""" self.beginRemoveRows(QtCore.QModelIndex(), 0, self.rowCount()) self.model.clear_parameters() self.endRemoveRows()",Removes all parameters from model "def get_pages(self): '''returns pages with rows''' pages = [] page = [] for i, item in enumerate(self.get_rows): if i > 0 and i % self.objects_per_page == 0: pages.append(page) page = [] page.append(item) pages.append(page) return pages",returns pages with rows "def show_banner(ctx, param, value): """"""Shows dynaconf awesome banner"""""" if not value or ctx.resilient_parsing: return set_settings() click.echo(settings.dynaconf_banner) click.echo(""Learn more at: http://github.com/rochacbruno/dynaconf"") ctx.exit()",Shows dynaconf awesome banner "def readline(self): """"""Get the next line including the newline or '' on EOF."""""" self.lineno += 1 if self._buffer: return self._buffer.pop() else: return self.input.readline()",Get the next line including the newline or '' on EOF. "def prefixsearch(self, prefix, results=10): """""" Perform a prefix search using the provided prefix string Args: prefix (str): Prefix string to use for search results (int): Number of pages with the prefix to return Returns: list: List of page titles Note: **Per the documentation:** ""The purpose of this module is \ similar to action=opensearch: to take user input and provide \ the best-matching titles. Depending on the search engine \ backend, this might include typo correction, redirect \ avoidance, or other heuristics."" """""" self._check_query(prefix, ""Prefix must be specified"") query_params = { ""list"": ""prefixsearch"", ""pssearch"": prefix, ""pslimit"": (""max"" if results > 500 else results), ""psnamespace"": 0, ""psoffset"": 0, # parameterize to skip to later in the list? } raw_results = self.wiki_request(query_params) self._check_error_response(raw_results, prefix) return [rec[""title""] for rec in raw_results[""query""][""prefixsearch""]]","Perform a prefix search using the provided prefix string Args: prefix (str): Prefix string to use for search results (int): Number of pages with the prefix to return Returns: list: List of page titles Note: **Per the documentation:** ""The purpose of this module is \ similar to action=opensearch: to take user input and provide \ the best-matching titles. Depending on the search engine \ backend, this might include typo correction, redirect \ avoidance, or other heuristics.""" "def score_intersect(self, term1, term2, **kwargs): """""" Compute the geometric area of the overlap between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """""" t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) # Integrate the overlap. overlap = np.minimum(t1_kde, t2_kde) return np.trapz(overlap)","Compute the geometric area of the overlap between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float" "def is_identity(term): """""" Tests to see if a PauliTerm or PauliSum is a scalar multiple of identity :param term: Either a PauliTerm or PauliSum :returns: True if the PauliTerm or PauliSum is a scalar multiple of identity, False otherwise :rtype: bool """""" if isinstance(term, PauliTerm): return (len(term) == 0) and (not np.isclose(term.coefficient, 0)) elif isinstance(term, PauliSum): return (len(term.terms) == 1) and (len(term.terms[0]) == 0) and \ (not np.isclose(term.terms[0].coefficient, 0)) else: raise TypeError(""is_identity only checks PauliTerms and PauliSum objects!"")","Tests to see if a PauliTerm or PauliSum is a scalar multiple of identity :param term: Either a PauliTerm or PauliSum :returns: True if the PauliTerm or PauliSum is a scalar multiple of identity, False otherwise :rtype: bool" "async def power_off( self, stop_mode: PowerStopMode = PowerStopMode.HARD, comment: str = None, wait: bool = False, wait_interval: int = 5): """""" Power off. :param stop_mode: How to perform the power off. :type stop_mode: `PowerStopMode` :param comment: Reason machine was powered on. :type comment: `str` :param wait: If specified, wait until the machine is powered on. :type wait: `bool` :param wait_interval: How often to poll, defaults to 5 seconds. :type wait_interval: `int` """""" params = {""system_id"": self.system_id, 'stop_mode': stop_mode.value} if comment is not None: params[""comment""] = comment try: self._data = await self._handler.power_off(**params) except CallError as error: if error.status == HTTPStatus.FORBIDDEN: message = ""Not allowed to power off machine."" raise OperationNotAllowed(message) from error else: raise if not wait or self.power_state == PowerState.UNKNOWN: # Don't wait for a machine that always shows power state as # unknown as the driver cannot query the power state. return self else: # Wait for machine to be powered off. while self.power_state == PowerState.ON: await asyncio.sleep(wait_interval) self._data = await self._handler.read(system_id=self.system_id) if self.power_state == PowerState.ERROR: msg = ""{hostname} failed to power off."".format( hostname=self.hostname ) raise PowerError(msg, self) return self","Power off. :param stop_mode: How to perform the power off. :type stop_mode: `PowerStopMode` :param comment: Reason machine was powered on. :type comment: `str` :param wait: If specified, wait until the machine is powered on. :type wait: `bool` :param wait_interval: How often to poll, defaults to 5 seconds. :type wait_interval: `int`" "def raw_corpus_length_ratio(hypotheses: Iterable[str], references: Iterable[str]) -> float: """""" Simple wrapper around length ratio implementation. :param hypotheses: Hypotheses stream. :param references: Reference stream. :return: Length ratio score as float. """""" ratios = [len(h.split())/len(r.split()) for h, r in zip(hypotheses, references)] return sum(ratios)/len(ratios) if len(ratios) else 0.0","Simple wrapper around length ratio implementation. :param hypotheses: Hypotheses stream. :param references: Reference stream. :return: Length ratio score as float." "def diagonalize_blocks(arr, blocksize): """""" Diagonalize sections of columns of an array for the whole array Parameters ---------- arr : numpy array Input array blocksize : int number of rows/colums forming one block Returns ------- numpy ndarray with shape (columns 'arr' * blocksize, columns 'arr' * blocksize) Example -------- arr: output: (blocksize = 3) 3 1 3 0 0 1 0 0 4 2 0 4 0 0 2 0 5 3 0 0 5 0 0 3 6 9 6 0 0 9 0 0 7 6 0 7 0 0 6 0 8 4 0 0 8 0 0 4 """""" nr_col = arr.shape[1] nr_row = arr.shape[0] if np.mod(nr_row, blocksize): raise ValueError( 'Number of rows of input array must be a multiple of blocksize') arr_diag = np.zeros((nr_row, blocksize*nr_col)) for col_ind, col_val in enumerate(arr.T): col_start = col_ind*blocksize col_end = blocksize + col_ind*blocksize for _ind in range(int(nr_row/blocksize)): row_start = _ind*blocksize row_end = blocksize + _ind * blocksize arr_diag[row_start:row_end, col_start:col_end] = np.diag(col_val[row_start:row_end]) return arr_diag","Diagonalize sections of columns of an array for the whole array Parameters ---------- arr : numpy array Input array blocksize : int number of rows/colums forming one block Returns ------- numpy ndarray with shape (columns 'arr' * blocksize, columns 'arr' * blocksize) Example -------- arr: output: (blocksize = 3) 3 1 3 0 0 1 0 0 4 2 0 4 0 0 2 0 5 3 0 0 5 0 0 3 6 9 6 0 0 9 0 0 7 6 0 7 0 0 6 0 8 4 0 0 8 0 0 4" "def up(tag, sql, revision): """""" Upgrade to revision """""" alembic_command.upgrade( config=get_config(), revision=revision, sql=sql, tag=tag )",Upgrade to revision "def _add_strings_to_commastring(self, field, strings): # type: (str, List[str]) -> bool """"""Add a list of strings to a comma separated list of strings Args: field (str): Field containing comma separated list strings (List[str]): list of strings to add Returns: bool: True if all strings added or False if any already present. """""" allstringsadded = True for string in strings: if not self._add_string_to_commastring(field, string): allstringsadded = False return allstringsadded","Add a list of strings to a comma separated list of strings Args: field (str): Field containing comma separated list strings (List[str]): list of strings to add Returns: bool: True if all strings added or False if any already present." "def duplicated(values, keep='first'): """""" Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray """""" values, dtype, ndtype = _ensure_data(values) f = getattr(htable, ""duplicated_{dtype}"".format(dtype=ndtype)) return f(values, keep=keep)","Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray" "def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False): """"""writes an instruction file for an SFR gage output file to read Flow only at all times Parameters ---------- gage_output_file : str the gage output filename (ASCII). ins_file : str the name of the instruction file to create. If None, the name is .ins. Default is None parse_filename : bool if True, get the gage_num parameter by parsing the gage output file filename if False, get the gage number from the file itself Returns ------- df : pandas.DataFrame a dataframe with obsnme and obsval for the sfr simulated flows. If inschek was not successfully run, then returns None ins_file : str file name of instructions file relating to gage output. obs_file : str file name of processed gage output for all times Note ---- sets up observations for gage outputs only for the Flow column. if parse_namefile is true, only text up to first '.' is used as the gage_num TODO : allow other observation types and align explicitly with times - now returns all values """""" if ins_file is None: ins_file = gage_output_file + '.ins' # navigate the file to be sure the header makes sense indat = [line.strip() for line in open(gage_output_file, 'r').readlines()] header = [i for i in indat if i.startswith('""')] # yank out the gage number to identify the observation names if parse_filename: gage_num = os.path.basename(gage_output_file).split('.')[0] else: gage_num = re.sub(""[^0-9]"", """", indat[0].lower().split(""gage no."")[-1].strip().split()[0]) # get the column names cols = [i.lower() for i in header if 'data' in i.lower()][0].lower().replace('""', '').replace('data:', '').split() # make sure ""Flow"" is included in the columns if 'flow' not in cols: raise Exception('Requested field ""Flow"" not in gage output columns') # find which column is for ""Flow"" flowidx = np.where(np.array(cols) == 'flow')[0][0] # write out the instruction file lines inslines = ['l1 ' + (flowidx + 1) * 'w ' + '!g{0}_{1:d}!'.format(gage_num, j) for j in range(len(indat) - len(header))] inslines[0] = inslines[0].replace('l1', 'l{0:d}'.format(len(header) + 1)) # write the instruction file with open(ins_file, 'w') as ofp: ofp.write('pif ~\n') [ofp.write('{0}\n'.format(line)) for line in inslines] df = _try_run_inschek(ins_file, gage_output_file) if df is not None: return df, ins_file, gage_output_file else: print(""Inschek didn't run so nothing returned"") return None","writes an instruction file for an SFR gage output file to read Flow only at all times Parameters ---------- gage_output_file : str the gage output filename (ASCII). ins_file : str the name of the instruction file to create. If None, the name is .ins. Default is None parse_filename : bool if True, get the gage_num parameter by parsing the gage output file filename if False, get the gage number from the file itself Returns ------- df : pandas.DataFrame a dataframe with obsnme and obsval for the sfr simulated flows. If inschek was not successfully run, then returns None ins_file : str file name of instructions file relating to gage output. obs_file : str file name of processed gage output for all times Note ---- sets up observations for gage outputs only for the Flow column. if parse_namefile is true, only text up to first '.' is used as the gage_num TODO : allow other observation types and align explicitly with times - now returns all values" "def getVariantSet(self, id_): """""" Returns the VariantSet with the specified name, or raises a VariantSetNotFoundException otherwise. """""" if id_ not in self._variantSetIdMap: raise exceptions.VariantSetNotFoundException(id_) return self._variantSetIdMap[id_]","Returns the VariantSet with the specified name, or raises a VariantSetNotFoundException otherwise." "def register_interaction(key=None): """"""Decorator registering an interaction class in the registry. If no key is provided, the class name is used as a key. A key is provided for each core bqplot interaction type so that the frontend can use this key regardless of the kernal language. """""" def wrap(interaction): name = key if key is not None else interaction.__module__ + \ interaction.__name__ interaction.types[name] = interaction return interaction return wrap","Decorator registering an interaction class in the registry. If no key is provided, the class name is used as a key. A key is provided for each core bqplot interaction type so that the frontend can use this key regardless of the kernal language." "def resolver(cls, var_name: str) -> FunctionType: """""" Variable resolver decorator. Function or method decorated with it is used to resolve the config variable. .. note:: Variable is resolved only once. Next gets are returned from the cache. :param var_name: Variable name :return: Function decorator """""" def dec(f): if var_name in cls().resolvers: raise ConfigurationError( f'Resolver for {var_name} already registered') cls().resolvers[var_name] = f return f return dec","Variable resolver decorator. Function or method decorated with it is used to resolve the config variable. .. note:: Variable is resolved only once. Next gets are returned from the cache. :param var_name: Variable name :return: Function decorator" "def list_acl_path(self): """"""Get all distinct acl paths. :return: Dictionary with the following structure: :: {'acl_paths': [ < acl_path >, ... ]} :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """""" url = 'environment/acl_path/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)","Get all distinct acl paths. :return: Dictionary with the following structure: :: {'acl_paths': [ < acl_path >, ... ]} :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response." "def isiterable(obj, reject_string=True): """"""convenience tool to detect if something is iterable. in python3, strings count as iterables to we have the option to exclude them Parameters: ----------- obj : object to analyse reject_string : bool, whether to ignore strings Returns: -------- bool, if the object is itereable. """""" iterable = hasattr(obj, '__len__') if reject_string: iterable = iterable and not isinstance(obj, str) return iterable","convenience tool to detect if something is iterable. in python3, strings count as iterables to we have the option to exclude them Parameters: ----------- obj : object to analyse reject_string : bool, whether to ignore strings Returns: -------- bool, if the object is itereable." "def static_url(redis, path): """"""Gets the static path for a file"""""" file_hash = get_cache_buster(redis, path) return ""%s/%s?v=%s"" % (oz.settings[""static_host""], path, file_hash)",Gets the static path for a file "def validate(self, grid): """""" Using the MagIC data model, generate validation errors on a MagicGrid. Parameters ---------- grid : dialogs.magic_grid3.MagicGrid The MagicGrid to be validated Returns --------- warnings: dict Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]} """""" grid_name = str(grid.GetName()) dmodel = self.contribution.dmodel reqd_headers = dmodel.get_reqd_headers(grid_name) df = self.contribution.tables[grid_name].df df = df.replace('', np.nan) # python does not view empty strings as null if df.empty: return {} col_names = set(df.columns) missing_headers = set(reqd_headers) - col_names present_headers = set(reqd_headers) - set(missing_headers) non_null_headers = df.dropna(how='all', axis='columns').columns null_reqd_headers = present_headers - set(non_null_headers) if any(missing_headers) or any (null_reqd_headers): warnings = {'missing required column(s)': sorted(missing_headers), 'no data in required column(s)': sorted(null_reqd_headers)} else: warnings = {} return warnings","Using the MagIC data model, generate validation errors on a MagicGrid. Parameters ---------- grid : dialogs.magic_grid3.MagicGrid The MagicGrid to be validated Returns --------- warnings: dict Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}" "def parametrized_bottleneck(x, hparams): """"""Meta-function calling all the above bottlenecks with hparams."""""" if hparams.bottleneck_kind == ""tanh_discrete"": d, _ = tanh_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode) return d, 0.0 if hparams.bottleneck_kind == ""isemhash"": return isemhash_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode, hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) if hparams.bottleneck_kind == ""vq"": return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon) if hparams.bottleneck_kind == ""em"": return vq_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, soft_em=True, num_samples=hparams.vq_num_samples) if hparams.bottleneck_kind == ""gumbel_softmax"": return gumbel_softmax_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, hparams.temperature_warmup_steps, hard=False, summary=True) raise ValueError( ""Unsupported hparams.bottleneck_kind %s"" % hparams.bottleneck_kind)",Meta-function calling all the above bottlenecks with hparams. "def predict(self, log2_bayes_factors, reset_index=False): """"""Guess the most likely modality for each event For each event that has at least one non-NA value, if no modalilites have logsumexp'd logliks greater than the log Bayes factor threshold, then they are assigned the 'multimodal' modality, because we cannot reject the null hypothesis that these did not come from the uniform distribution. Parameters ---------- log2_bayes_factors : pandas.DataFrame A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0, bimodal, and middle modalities. If an event has no bayes factors for any of those modalities, it is ignored reset_index : bool If True, remove the first level of the index from the dataframe. Useful if you are using this function to apply to a grouped dataframe where the first level is something other than the modality, e.g. the celltype Returns ------- modalities : pandas.Series A (n_events,) series with the most likely modality for each event """""" if reset_index: x = log2_bayes_factors.reset_index(level=0, drop=True) else: x = log2_bayes_factors if isinstance(x, pd.DataFrame): not_na = (x.notnull() > 0).any() not_na_columns = not_na[not_na].index x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh elif isinstance(x, pd.Series): x[NULL_MODEL] = self.logbf_thresh return x.idxmax()","Guess the most likely modality for each event For each event that has at least one non-NA value, if no modalilites have logsumexp'd logliks greater than the log Bayes factor threshold, then they are assigned the 'multimodal' modality, because we cannot reject the null hypothesis that these did not come from the uniform distribution. Parameters ---------- log2_bayes_factors : pandas.DataFrame A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0, bimodal, and middle modalities. If an event has no bayes factors for any of those modalities, it is ignored reset_index : bool If True, remove the first level of the index from the dataframe. Useful if you are using this function to apply to a grouped dataframe where the first level is something other than the modality, e.g. the celltype Returns ------- modalities : pandas.Series A (n_events,) series with the most likely modality for each event" "def sensor(sensor_type, cfg): """""" Creates a Kinect2 sensor of the specified type. Parameters ---------- sensor_type : :obj:`str` the type of the sensor (real or virtual) cfg : :obj:`YamlConfig` dictionary of parameters for sensor initialization """""" sensor_type = sensor_type.lower() if sensor_type == 'real': s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'], device_num=cfg['device_num'], frame=cfg['frame']) elif sensor_type == 'virtual': s = VirtualKinect2Sensor(cfg['image_dir'], frame=cfg['frame']) elif sensor_type == 'bridged': s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame']) else: raise ValueError('Kinect2 sensor type %s not supported' %(sensor_type)) return s","Creates a Kinect2 sensor of the specified type. Parameters ---------- sensor_type : :obj:`str` the type of the sensor (real or virtual) cfg : :obj:`YamlConfig` dictionary of parameters for sensor initialization" "def start(self): """""" Launches a new POP3 client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """""" username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) try: logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('pop3', server_host, server_port, session.id)) conn = poplib.POP3_SSL(server_host, server_port) session.source_port = conn.sock.getsockname()[1] banner = conn.getwelcome() session.protocol_data['banner'] = banner session.did_connect = True conn.user(username) conn.pass_(password) # TODO: Handle failed login session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True session.timestamp = datetime.utcnow() except Exception as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) else: list_entries = conn.list()[1] for entry in list_entries: index, _ = entry.split(' ') conn.retr(index) conn.dele(index) logger.debug('Found and deleted {0} messages on {1}'.format(len(list_entries), server_host)) conn.quit() session.did_complete = True finally: session.alldone = True session.end_session()","Launches a new POP3 client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself" "def _crop_list_to_size(l, size): """"""Make a list a certain size"""""" for x in range(size - len(l)): l.append(False) for x in range(len(l) - size): l.pop() return l",Make a list a certain size "def parse(cls, line, encoding=pydle.protocol.DEFAULT_ENCODING): """""" Parse given line into IRC message structure. Returns a TaggedMessage. """""" valid = True # Decode message. try: message = line.decode(encoding) except UnicodeDecodeError: # Try our fallback encoding. message = line.decode(pydle.protocol.FALLBACK_ENCODING) # Sanity check for message length. if len(message) > TAGGED_MESSAGE_LENGTH_LIMIT: valid = False # Strip message separator. if message.endswith(rfc1459.protocol.LINE_SEPARATOR): message = message[:-len(rfc1459.protocol.LINE_SEPARATOR)] elif message.endswith(rfc1459.protocol.MINIMAL_LINE_SEPARATOR): message = message[:-len(rfc1459.protocol.MINIMAL_LINE_SEPARATOR)] raw = message # Parse tags. tags = {} if message.startswith(TAG_INDICATOR): message = message[len(TAG_INDICATOR):] raw_tags, message = message.split(' ', 1) for raw_tag in raw_tags.split(TAG_SEPARATOR): if TAG_VALUE_SEPARATOR in raw_tag: tag, value = raw_tag.split(TAG_VALUE_SEPARATOR, 1) else: tag = raw_tag value = True tags[tag] = value # Parse rest of message. message = super().parse(message.lstrip().encode(encoding), encoding=encoding) return TaggedMessage(_raw=raw, _valid=message._valid and valid, tags=tags, **message._kw)","Parse given line into IRC message structure. Returns a TaggedMessage." "def run_server(conn, command, sock_path, debug, timeout): """"""Common code for run_agent and run_git below."""""" ret = 0 try: handler = protocol.Handler(conn=conn, debug=debug) with serve(handler=handler, sock_path=sock_path, timeout=timeout) as env: if command: ret = server.run_process(command=command, environ=env) else: signal.pause() # wait for signal (e.g. SIGINT) except KeyboardInterrupt: log.info('server stopped') return ret",Common code for run_agent and run_git below. "def asArcPyObject(self): """""" returns the Envelope as an ESRI arcpy.Polygon object """""" env = self.asDictionary ring = [[ Point(env['xmin'], env['ymin'], self._wkid), Point(env['xmax'], env['ymin'], self._wkid), Point(env['xmax'], env['ymax'], self._wkid), Point(env['xmin'], env['ymax'], self._wkid) ]] return Polygon(rings=ring, wkid=self._wkid, wkt=self._wkid, hasZ=False, hasM=False).asArcPyObject",returns the Envelope as an ESRI arcpy.Polygon object "def delete_institute(self, institute): """""" Called when institute is deleted. """""" name = institute.name logger.debug(""institute_deleted '%s'"" % name) # institute deleted self._call([""goldsh"", ""Organization"", ""Delete"", ""Name==%s"" % name]) logger.debug(""returning"") return",Called when institute is deleted. "def pretty_print_table_instance(table): """"""Pretty print a table instance."""""" assert isinstance(table, Table) def pretty_print_row(styled, plain): """"""Pretty print a row."""""" click.secho( "" | "".join( v + "" "" * (table.column_widths[k] - len(plain[k])) for k, v in enumerate(styled) ) ) pretty_print_row(table.headers, table.plain_headers) for k, row in enumerate(table.rows): pretty_print_row(row, table.plain_rows[k])",Pretty print a table instance. "def isSet(self, param): """""" Checks whether a param is explicitly set by user. """""" param = self._resolveParam(param) return param in self._paramMap",Checks whether a param is explicitly set by user. "def select_inputs(self, address: str, amount: int) -> dict: '''finds apropriate utxo's to include in rawtx, while being careful to never spend old transactions with a lot of coin age. Argument is intiger, returns list of apropriate UTXO's''' utxos = [] utxo_sum = Decimal(0) for tx in sorted(self.listunspent(address=address), key=itemgetter('confirmations')): if tx[""address""] not in (self.pa_parameters.P2TH_addr, self.pa_parameters.test_P2TH_addr): utxos.append( MutableTxIn(txid=tx['txid'], txout=tx['vout'], sequence=Sequence.max(), script_sig=ScriptSig.empty()) ) utxo_sum += Decimal(tx[""amount""]) if utxo_sum >= amount: return {'utxos': utxos, 'total': utxo_sum} if utxo_sum < amount: raise InsufficientFunds(""Insufficient funds."") raise Exception(""undefined behavior :.("")","finds apropriate utxo's to include in rawtx, while being careful to never spend old transactions with a lot of coin age. Argument is intiger, returns list of apropriate UTXO's" "def _verify_constrained_hash(self): """""" Warn if the object name is not explicitly set. """""" changed_params = dict(self.param.get_param_values(onlychanged=True)) if self.time_dependent and ('name' not in changed_params): self.param.warning(""Default object name used to set the seed: "" ""random values conditional on object instantiation order."")",Warn if the object name is not explicitly set. "def month_name(self, locale=None): """""" Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') """""" if self.tz is not None and not timezones.is_utc(self.tz): values = self._local_timestamps() else: values = self.asi8 result = fields.get_date_name_field(values, 'month_name', locale=locale) result = self._maybe_mask_results(result, fill_value=None) return result","Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object')" "def check_my_users(user): """"""Check if user exists and its credentials. Take a look at encrypt_app.py and encrypt_cli.py to see how to encrypt passwords """""" user_data = my_users.get(user['username']) if not user_data: return False # <--- invalid credentials elif user_data.get('password') == user['password']: return True # <--- user is logged in! return False","Check if user exists and its credentials. Take a look at encrypt_app.py and encrypt_cli.py to see how to encrypt passwords" "def __batch_update(self, train_events, test_events, n_epoch): """"""Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training. """""" for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching) if n_epoch != 1: np.random.shuffle(train_events) # train for e in train_events: self.rec.update(e, batch_train=True) # test MPR = self.__batch_evaluate(test_events) if self.debug: logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))","Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training." "def plot_bcr(fignum, Bcr1, Bcr2): """""" function to plot two estimates of Bcr against each other """""" plt.figure(num=fignum) plt.plot(Bcr1, Bcr2, 'ro') plt.xlabel('Bcr1') plt.ylabel('Bcr2') plt.title('Compare coercivity of remanence')",function to plot two estimates of Bcr against each other "def iter_emails(self, number=-1, etag=None): """"""Iterate over email addresses for the authenticated user. :param int number: (optional), number of email addresses to return. Default: -1 returns all available email addresses :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of dicts """""" url = self._build_url('user', 'emails') return self._iter(int(number), url, dict, etag=etag)","Iterate over email addresses for the authenticated user. :param int number: (optional), number of email addresses to return. Default: -1 returns all available email addresses :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of dicts" "def create_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False): """"""Create the DCNM In Network and store the result in DB. """""" tenant_name = fw_dict.get('tenant_name') ret = self._create_service_nwk(tenant_id, tenant_name, 'in') if ret: res = fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS LOG.info(""In Service network created for tenant %s"", tenant_id) else: res = fw_const.DCNM_IN_NETWORK_CREATE_FAIL LOG.info(""In Service network create failed for tenant %s"", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret",Create the DCNM In Network and store the result in DB. "def parse_vlq(self, segment): """""" Parse a string of VLQ-encoded data. Returns: a list of integers. """""" values = [] cur, shift = 0, 0 for c in segment: val = B64[ord(c)] # Each character is 6 bits: # 5 of value and the high bit is the continuation. val, cont = val & 0b11111, val >> 5 cur += val << shift shift += 5 if not cont: # The low bit of the unpacked value is the sign. cur, sign = cur >> 1, cur & 1 if sign: cur = -cur values.append(cur) cur, shift = 0, 0 if cur or shift: raise SourceMapDecodeError('leftover cur/shift in vlq decode') return values","Parse a string of VLQ-encoded data. Returns: a list of integers." "def raw_message(self, message, silent=False): """"""Display a message in the Vim status line."""""" vim = self._vim cmd = 'echo ""{}""'.format(message.replace('""', '\\""')) if silent: cmd = 'silent ' + cmd if self.isneovim: vim.async_call(vim.command, cmd) else: vim.command(cmd)",Display a message in the Vim status line. "def describe(self): """"""Information about this parameter"""""" desc = { 'name': self.name, 'description': self.description, # the Parameter might not have a type at all 'type': self.type or 'unknown', } for attr in ['min', 'max', 'allowed', 'default']: v = getattr(self, attr) if v is not None: desc[attr] = v return desc",Information about this parameter "def close(self): """""" Close this window. If this window is active, switch to previous window """""" target = self.prev if (self.is_current and self.prev != self) else None with switch_window(self._browser, self.name): self._browser.driver.close() if target is not None: target.is_current = True","Close this window. If this window is active, switch to previous window" "def _calculateBasalLearning(self, activeColumns, burstingColumns, correctPredictedCells, activeBasalSegments, matchingBasalSegments, basalPotentialOverlaps): """""" Basic Temporal Memory learning. Correctly predicted cells always have active basal segments, and we learn on these segments. In bursting columns, we either learn on an existing basal segment, or we grow a new one. The only influence apical dendrites have on basal learning is: the apical dendrites influence which cells are considered ""predicted"". So an active apical dendrite can prevent some basal segments in active columns from learning. @param correctPredictedCells (numpy array) @param burstingColumns (numpy array) @param activeBasalSegments (numpy array) @param matchingBasalSegments (numpy array) @param basalPotentialOverlaps (numpy array) @return (tuple) - learningActiveBasalSegments (numpy array) Active basal segments on correct predicted cells - learningMatchingBasalSegments (numpy array) Matching basal segments selected for learning in bursting columns - basalSegmentsToPunish (numpy array) Basal segments that should be punished for predicting an inactive column - newBasalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new basal segments - learningCells (numpy array) Cells that have learning basal segments or are selected to grow a basal segment """""" # Correctly predicted columns learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell( activeBasalSegments, correctPredictedCells) cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells( matchingBasalSegments) matchingCells = np.unique(cellsForMatchingBasal) (matchingCellsInBurstingColumns, burstingColumnsWithNoMatch) = np2.setCompare( matchingCells, burstingColumns, matchingCells / self.cellsPerColumn, rightMinusLeft=True) learningMatchingBasalSegments = self._chooseBestSegmentPerColumn( self.basalConnections, matchingCellsInBurstingColumns, matchingBasalSegments, basalPotentialOverlaps, self.cellsPerColumn) newBasalSegmentCells = self._getCellsWithFewestSegments( self.basalConnections, self.rng, burstingColumnsWithNoMatch, self.cellsPerColumn) learningCells = np.concatenate( (correctPredictedCells, self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments), newBasalSegmentCells)) # Incorrectly predicted columns correctMatchingBasalMask = np.in1d( cellsForMatchingBasal / self.cellsPerColumn, activeColumns) basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask] return (learningActiveBasalSegments, learningMatchingBasalSegments, basalSegmentsToPunish, newBasalSegmentCells, learningCells)","Basic Temporal Memory learning. Correctly predicted cells always have active basal segments, and we learn on these segments. In bursting columns, we either learn on an existing basal segment, or we grow a new one. The only influence apical dendrites have on basal learning is: the apical dendrites influence which cells are considered ""predicted"". So an active apical dendrite can prevent some basal segments in active columns from learning. @param correctPredictedCells (numpy array) @param burstingColumns (numpy array) @param activeBasalSegments (numpy array) @param matchingBasalSegments (numpy array) @param basalPotentialOverlaps (numpy array) @return (tuple) - learningActiveBasalSegments (numpy array) Active basal segments on correct predicted cells - learningMatchingBasalSegments (numpy array) Matching basal segments selected for learning in bursting columns - basalSegmentsToPunish (numpy array) Basal segments that should be punished for predicting an inactive column - newBasalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new basal segments - learningCells (numpy array) Cells that have learning basal segments or are selected to grow a basal segment" "def off(self, event): 'Remove an event handler' try: self._once_events.remove(event) except KeyError: pass self._callback_by_event.pop(event, None)",Remove an event handler "def _init_imu(self): """""" Internal. Initialises the IMU sensor via RTIMU """""" if not self._imu_init: self._imu_init = self._imu.IMUInit() if self._imu_init: self._imu_poll_interval = self._imu.IMUGetPollInterval() * 0.001 # Enable everything on IMU self.set_imu_config(True, True, True) else: raise OSError('IMU Init Failed')",Internal. Initialises the IMU sensor via RTIMU "def validate_callback(self, service, pgturl, pgtid, pgtiou): """"""Verify the provided proxy callback URL."""""" if not proxy_allowed(service): raise UnauthorizedServiceProxy(""%s is not authorized to use proxy authentication"" % service) if not is_scheme_https(pgturl): raise InvalidProxyCallback(""Proxy callback %s is not HTTPS"" % pgturl) if not proxy_callback_allowed(service, pgturl): raise InvalidProxyCallback(""%s is not an authorized proxy callback URL"" % pgturl) # Verify that the SSL certificate is valid verify = os.environ.get('REQUESTS_CA_BUNDLE', True) try: requests.get(pgturl, verify=verify, timeout=5) except requests.exceptions.SSLError: raise InvalidProxyCallback(""SSL certificate validation failed for proxy callback %s"" % pgturl) except requests.exceptions.RequestException as e: raise InvalidProxyCallback(e) # Callback certificate appears valid, so send the ticket strings pgturl = add_query_params(pgturl, {'pgtId': pgtid, 'pgtIou': pgtiou}) try: response = requests.get(pgturl, verify=verify, timeout=5) except requests.exceptions.RequestException as e: raise InvalidProxyCallback(e) try: response.raise_for_status() except requests.exceptions.HTTPError as e: raise InvalidProxyCallback(""Proxy callback %s returned %s"" % (pgturl, e))",Verify the provided proxy callback URL. "def create_data_types(self): """"""Map of standard playbook variable types to create method."""""" return { 'Binary': self.create_binary, 'BinaryArray': self.create_binary_array, 'KeyValue': self.create_key_value, 'KeyValueArray': self.create_key_value_array, 'String': self.create_string, 'StringArray': self.create_string_array, 'TCEntity': self.create_tc_entity, 'TCEntityArray': self.create_tc_entity_array, }",Map of standard playbook variable types to create method. "def _export_graph(self): """""" Exports latest saved models to .nn format for Unity embedding. """""" for brain_name in self.trainers.keys(): self.trainers[brain_name].export_model()",Exports latest saved models to .nn format for Unity embedding. "def validate_key(self, key): """""" Called if the key_name class attribute is not None. """""" if not key: name = self.__class__.__name__ msg = ""%s response missing %s"" % (name, self.key_name) raise ValidationException(msg, self) elif not isinstance(key, str): msg = ""Response contains invalid %s type"" % self.key_name raise ValidationException(msg, type(key))",Called if the key_name class attribute is not None. "def wait_for_futures(self): """""" Wait for all futures to complete. This should be done at the end of an an invocation. """""" [future for future in futures.as_completed(self.futures)] self.futures = []","Wait for all futures to complete. This should be done at the end of an an invocation." "def _get_datapath(self): """""" Get a valid datapath, else raise an exception. """""" if self._datapath is None: raise OSError(errno.ENOENT, ""You didn't provide any datapath for %r"" % self.filename) return self._datapath","Get a valid datapath, else raise an exception." "def _split_line(self, line): """"""Split line into field values."""""" line = line.rstrip('\r\n') flds = re.split('\t', line) assert len(flds) == self.exp_numcol, ""EXPECTED({E}) COLUMNS, ACTUAL({A}): {L}"".format( E=self.exp_numcol, A=len(flds), L=line) return flds",Split line into field values. "def evaluate(dataloader): """"""Evaluate network on the specified dataset"""""" total_L = 0.0 total_sample_num = 0 total_correct_num = 0 start_log_interval_time = time.time() print('Begin Testing...') for i, ((data, valid_length), label) in enumerate(dataloader): data = mx.nd.transpose(data.as_in_context(context)) valid_length = valid_length.as_in_context(context).astype(np.float32) label = label.as_in_context(context) output = net(data, valid_length) L = loss(output, label) pred = (output > 0.5).reshape((-1,)) total_L += L.sum().asscalar() total_sample_num += label.shape[0] total_correct_num += (pred == label).sum().asscalar() if (i + 1) % args.log_interval == 0: print('[Batch {}/{}] elapsed {:.2f} s'.format( i + 1, len(dataloader), time.time() - start_log_interval_time)) start_log_interval_time = time.time() avg_L = total_L / float(total_sample_num) acc = total_correct_num / float(total_sample_num) return avg_L, acc",Evaluate network on the specified dataset "def get_fallback_languages(self): """""" Return the fallback language codes, which are used in case there is no translation for the currently active language. """""" lang_dict = get_language_settings(self._current_language) fallbacks = [lang for lang in lang_dict['fallbacks'] if lang != self._current_language] return fallbacks or []","Return the fallback language codes, which are used in case there is no translation for the currently active language." "def pin(package, version, checks, marker, resolving, lazy, quiet): """"""Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to `none` will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. """""" root = get_root() package_name = package.lower() version = version.lower() for check_name in sorted(os.listdir(root)): pinned_reqs_file = os.path.join(root, check_name, 'requirements.in') resolved_reqs_file = os.path.join(root, check_name, 'requirements.txt') if os.path.isfile(pinned_reqs_file): pinned_packages = {package.name: package for package in read_packages(pinned_reqs_file)} if package not in pinned_packages and check_name not in checks: continue if resolving: pre_packages = list(read_packages(resolved_reqs_file)) else: pre_packages = list(itervalues(pinned_packages)) if not quiet: echo_info('Check `{}`:'.format(check_name)) if version == 'none': del pinned_packages[package_name] else: pinned_packages[package_name] = Package(package_name, version, marker) package_list = sorted(itervalues(pinned_packages)) write_file_lines(pinned_reqs_file, ('{}\n'.format(package) for package in package_list)) if not quiet: echo_waiting(' Resolving dependencies...') if resolving: result = resolve_requirements(pinned_reqs_file, resolved_reqs_file, lazy=lazy) if result.code: abort(result.stdout + result.stderr) if not quiet: post_packages = read_packages(resolved_reqs_file if resolving else pinned_reqs_file) display_package_changes(pre_packages, post_packages, indent=' ')","Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to `none` will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments." "def get_dimension(self, key, value, **kwargs): """""" get a dimension by key and value Args: key (string): key of the dimension value (string): value of the dimension Returns: dictionary of response """""" return self._get_object_by_name(self._DIMENSION_ENDPOINT_SUFFIX, '{0}/{1}'.format(key, value), **kwargs)","get a dimension by key and value Args: key (string): key of the dimension value (string): value of the dimension Returns: dictionary of response" "def create_endpoint(service_name: str, *, ipv4: OptStr = None, ipv6: OptStr = None, port: OptInt = None) -> Endpoint: """"""Factory function to create Endpoint object. """""" return Endpoint(service_name, ipv4, ipv6, port)",Factory function to create Endpoint object. "def delete_fwrule(kwargs=None, call=None): ''' Permanently delete a firewall rule. CLI Example: .. code-block:: bash salt-cloud -f delete_fwrule gce name=allow-http ''' if call != 'function': raise SaltCloudSystemExit( 'The delete_fwrule function must be called with -f or --function.' ) if not kwargs or 'name' not in kwargs: log.error( 'A name must be specified when deleting a firewall rule.' ) return False name = kwargs['name'] conn = get_conn() __utils__['cloud.fire_event']( 'event', 'delete firewall', 'salt/cloud/firewall/deleting', args={ 'name': name, }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: result = conn.ex_destroy_firewall( conn.ex_get_firewall(name) ) except ResourceNotFoundError as exc: log.error( 'Rule %s was not found. Exception was: %s', name, exc, exc_info_on_loglevel=logging.DEBUG ) return False __utils__['cloud.fire_event']( 'event', 'deleted firewall', 'salt/cloud/firewall/deleted', args={ 'name': name, }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return result","Permanently delete a firewall rule. CLI Example: .. code-block:: bash salt-cloud -f delete_fwrule gce name=allow-http" "def get_instance(self, payload): """""" Build an instance of AlphaSenderInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance :rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance """""" return AlphaSenderInstance(self._version, payload, service_sid=self._solution['service_sid'], )","Build an instance of AlphaSenderInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance :rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance" "def plural_adj(self, text, count=None): """""" Return the plural of text, where text is an adjective. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. """""" pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess(word, self._pl_special_adjective(word, count) or word) return ""{}{}{}"".format(pre, plural, post)","Return the plural of text, where text is an adjective. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved." "def load_OGLE3_catalog(): """"""Return the full list of variables stars of OGLE-3 as a DataFrame """""" with bz2.BZ2File(CATALOG_PATH) as bz2fp, warnings.catch_warnings(): warnings.simplefilter(""ignore"") df = pd.read_table(bz2fp, skiprows=6) df.rename(columns={""# ID"": ""ID""}, inplace=True) return df",Return the full list of variables stars of OGLE-3 as a DataFrame "def try_remove(self): """"""Try to remove the path If it is a directory, try recursive removal of contents too """""" if self.islink(): self.unlink() elif self.isfile(): self.remove() elif self.isdir(): self.empty_directory() if self.isdir(): self.rmdir() else: return False return True","Try to remove the path If it is a directory, try recursive removal of contents too" "def register_backend(cls: Type[StorageBackend]): """"""Decorator to register another StorageBackend using it's `NAME`."""""" if not issubclass(cls, StorageBackend): raise TypeError(""cls must be a subclass of StorageBackend"") __registry__[cls.NAME] = cls return cls",Decorator to register another StorageBackend using it's `NAME`. "def refresh_core(self): """"""Query device for all attributes that exist regardless of power state. This will force a refresh for all device queries that are valid to request at any time. It's the only safe suite of queries that we can make if we do not know the current state (on or off+standby). This does not return any data, it just issues the queries. """""" self.log.info('Sending out mass query for all attributes') for key in ATTR_CORE: self.query(key)","Query device for all attributes that exist regardless of power state. This will force a refresh for all device queries that are valid to request at any time. It's the only safe suite of queries that we can make if we do not know the current state (on or off+standby). This does not return any data, it just issues the queries." "def to_xml(self): """""" Returns a string containing the XML version of the Lifecycle configuration as defined by S3. """""" s = '' for rule in self: s += rule.to_xml() s += '' return s","Returns a string containing the XML version of the Lifecycle configuration as defined by S3." "def state(self): """""" Reading returns a list of state flags. Possible flags are `running`, `ramping`, `holding`, `overloaded` and `stalled`. """""" self._state, value = self.get_attr_set(self._state, 'state') return value","Reading returns a list of state flags. Possible flags are `running`, `ramping`, `holding`, `overloaded` and `stalled`." "def reload_libraries(library_directories: list = None): """""" Reload the libraries stored in the project's local and shared library directories """""" directories = library_directories or [] project = cauldron.project.get_internal_project() if project: directories += project.library_directories if not directories: return def reload_module(path: str, library_directory: str): path = os.path.dirname(path) if path.endswith('__init__.py') else path start_index = len(library_directory) + 1 end_index = -3 if path.endswith('.py') else None package_path = path[start_index:end_index] module = sys.modules.get(package_path.replace(os.sep, '.')) return importlib.reload(module) if module is not None else None def reload_library(directory: str) -> list: if not add_library_path(directory): # If the library wasn't added because it doesn't exist, remove it # in case the directory has recently been deleted and then return # an empty result remove_library_path(directory) return [] glob_path = os.path.join(directory, '**', '*.py') return [ reload_module(path, directory) for path in glob.glob(glob_path, recursive=True) ] return [ reloaded_module for directory in directories for reloaded_module in reload_library(directory) if reload_module is not None ]","Reload the libraries stored in the project's local and shared library directories" "def get_graderoster(section, instructor, requestor): """""" Returns a restclients.GradeRoster for the passed Section model and instructor Person. """""" label = GradeRoster(section=section, instructor=instructor).graderoster_label() url = ""{}/{}"".format(graderoster_url, encode_section_label(label)) headers = {""Accept"": ""text/xhtml"", ""Connection"": ""keep-alive"", ""X-UW-Act-as"": requestor.uwnetid} response = SWS_GradeRoster_DAO().getURL(url, headers) if response.status != 200: root = etree.fromstring(response.data) msg = root.find("".//*[@class='status_description']"").text.strip() raise DataFailureException(url, response.status, msg) return GradeRoster(data=etree.fromstring(response.data.strip()), section=section, instructor=instructor)","Returns a restclients.GradeRoster for the passed Section model and instructor Person." "def get_by_resource(self, resource_uri): """""" Gets all the labels for the specified resource Args: resource_uri: The resource URI Returns: dict: Resource Labels """""" uri = self.URI + self.RESOURCES_PATH + '/' + resource_uri return self._client.get(id_or_uri=uri)","Gets all the labels for the specified resource Args: resource_uri: The resource URI Returns: dict: Resource Labels" "def data(self): """""" Helper class for parsing JSON POST data into a Python object. """""" if self.request.method == 'GET': return self.request.GET else: assert self.request.META['CONTENT_TYPE'].startswith('application/json') charset = self.request.encoding or settings.DEFAULT_CHARSET return json.loads(self.request.body.decode(charset))",Helper class for parsing JSON POST data into a Python object. "def getScan(self, title, peptide=None): """""" allows random lookup """""" if self.ra.has_key(title): self.filename.seek(self.ra[title][0],0) toRead = self.ra[title][1]-self.ra[title][0] info = self.filename.read(toRead) scan = self.parseScan(info) else: return None return scan",allows random lookup "def get_all_synIdx(self): """""" Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell """""" tic = time() #containers for synapse idxs existing on this rank synIdx = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex) #find synapse locations for cell in parallel synIdx[cellindex] = self.get_synidx(cellindex) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found synapse locations in %.2f seconds' % (time()-tic)) #print the number of synapses per layer from which presynapse population if self.verbose: for cellindex in self.RANK_CELLINDICES: for i, synidx in enumerate(synIdx[cellindex]): print('to:\t%s\tcell:\t%i\tfrom:\t%s:' % (self.y, cellindex, self.X[i]),) idxcount = 0 for idx in synidx: idxcount += idx.size print('\t%i' % idx.size,) print('\ttotal %i' % idxcount) return synIdx","Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell" "def order_pseudotime(self): """"""Define indices that reflect segment and pseudotime order. Writes ------ indices : np.ndarray Index array of shape n, which stores an ordering of the data points with respect to increasing segment index and increasing pseudotime. changepoints : np.ndarray Index array of shape len(ssegs)-1, which stores the indices of points where the segment index changes, with respect to the ordering of indices. """""" # within segs_tips, order tips according to pseudotime if self.iroot is not None: for itips, tips in enumerate(self.segs_tips): if tips[0] != -1: indices = np.argsort(self.pseudotime[tips]) self.segs_tips[itips] = self.segs_tips[itips][indices] else: logg.m(' group', itips, 'is very small', v=4) # sort indices according to segments indices = np.argsort(self.segs_names) segs_names = self.segs_names[indices] # find changepoints of segments changepoints = np.arange(indices.size-1)[np.diff(segs_names) == 1] + 1 if self.iroot is not None: pseudotime = self.pseudotime[indices] for iseg, seg in enumerate(self.segs): # only consider one segment, it's already ordered by segment seg_sorted = seg[indices] # consider the pseudotime on this segment and sort them seg_indices = np.argsort(pseudotime[seg_sorted]) # within the segment, order indices according to increasing pseudotime indices[seg_sorted] = indices[seg_sorted][seg_indices] # define class members self.indices = indices self.changepoints = changepoints","Define indices that reflect segment and pseudotime order. Writes ------ indices : np.ndarray Index array of shape n, which stores an ordering of the data points with respect to increasing segment index and increasing pseudotime. changepoints : np.ndarray Index array of shape len(ssegs)-1, which stores the indices of points where the segment index changes, with respect to the ordering of indices." "def _create_minimum_needs_action(self): """"""Create action for minimum needs dialog."""""" icon = resources_path('img', 'icons', 'show-minimum-needs.svg') self.action_minimum_needs = QAction( QIcon(icon), self.tr('Minimum Needs Calculator'), self.iface.mainWindow()) self.action_minimum_needs.setStatusTip(self.tr( 'Open InaSAFE minimum needs calculator')) self.action_minimum_needs.setWhatsThis(self.tr( 'Open InaSAFE minimum needs calculator')) self.action_minimum_needs.triggered.connect(self.show_minimum_needs) self.add_action( self.action_minimum_needs, add_to_toolbar=self.full_toolbar)",Create action for minimum needs dialog. "def set_album(self, album): """"""Sets song's album :param album: album """""" self._set_attr(TALB(encoding=3, text=album.decode('utf-8')))","Sets song's album :param album: album" "def count(self, query): """""" Returns number of matching entries :type query str :rtype: int """""" body = { ""query"": { ""bool"": { ""must"": [{ ""query_string"": { ""query"": query, } }] } } } body['query']['bool']['must'].append(self._get_timestamp_filer()) return self._es.count(index=self._index, body=body).get('count')","Returns number of matching entries :type query str :rtype: int" "def _determine_auth(**kwargs): ''' Acquire Azure ARM Credentials ''' if 'profile' in kwargs: azure_credentials = __salt__['config.option'](kwargs['profile']) kwargs.update(azure_credentials) service_principal_creds_kwargs = ['client_id', 'secret', 'tenant'] user_pass_creds_kwargs = ['username', 'password'] try: if kwargs.get('cloud_environment') and kwargs.get('cloud_environment').startswith('http'): cloud_env = get_cloud_from_metadata_endpoint(kwargs['cloud_environment']) else: cloud_env_module = importlib.import_module('msrestazure.azure_cloud') cloud_env = getattr(cloud_env_module, kwargs.get('cloud_environment', 'AZURE_PUBLIC_CLOUD')) except (AttributeError, ImportError, MetadataEndpointError): raise sys.exit('The Azure cloud environment {0} is not available.'.format(kwargs['cloud_environment'])) if set(service_principal_creds_kwargs).issubset(kwargs): if not (kwargs['client_id'] and kwargs['secret'] and kwargs['tenant']): raise SaltInvocationError( 'The client_id, secret, and tenant parameters must all be ' 'populated if using service principals.' ) else: credentials = ServicePrincipalCredentials(kwargs['client_id'], kwargs['secret'], tenant=kwargs['tenant'], cloud_environment=cloud_env) elif set(user_pass_creds_kwargs).issubset(kwargs): if not (kwargs['username'] and kwargs['password']): raise SaltInvocationError( 'The username and password parameters must both be ' 'populated if using username/password authentication.' ) else: credentials = UserPassCredentials(kwargs['username'], kwargs['password'], cloud_environment=cloud_env) elif 'subscription_id' in kwargs: try: from msrestazure.azure_active_directory import ( MSIAuthentication ) credentials = MSIAuthentication(cloud_environment=cloud_env) except ImportError: raise SaltSystemExit(msg='MSI authentication support not availabe (requires msrestazure >= 0.4.14)') else: raise SaltInvocationError( 'Unable to determine credentials. ' 'A subscription_id with username and password, ' 'or client_id, secret, and tenant or a profile with the ' 'required parameters populated' ) if 'subscription_id' not in kwargs: raise SaltInvocationError( 'A subscription_id must be specified' ) subscription_id = salt.utils.stringutils.to_str(kwargs['subscription_id']) return credentials, subscription_id, cloud_env",Acquire Azure ARM Credentials "def month(self, month): """""" Sets the month of this ReportResponse. Month of requested billing report :param month: The month of this ReportResponse. :type: str """""" if month is None: raise ValueError(""Invalid value for `month`, must not be `None`"") if month is not None and not re.search('^\\d{4}-\\d{2}$', month): raise ValueError(""Invalid value for `month`, must be a follow pattern or equal to `/^\\d{4}-\\d{2}$/`"") self._month = month","Sets the month of this ReportResponse. Month of requested billing report :param month: The month of this ReportResponse. :type: str" "def has_access_api(f): """""" Use this decorator to enable granular security permissions to your API methods. Permissions will be associated to a role, and roles are associated to users. By default the permission's name is the methods name. this will return a message and HTTP 401 is case of unauthorized access. """""" if hasattr(f, '_permission_name'): permission_str = f._permission_name else: permission_str = f.__name__ def wraps(self, *args, **kwargs): permission_str = PERMISSION_PREFIX + f._permission_name if self.appbuilder.sm.has_access( permission_str, self.__class__.__name__ ): return f(self, *args, **kwargs) else: log.warning( LOGMSG_ERR_SEC_ACCESS_DENIED.format( permission_str, self.__class__.__name__ ) ) response = make_response( jsonify( { 'message': str(FLAMSG_ERR_SEC_ACCESS_DENIED), 'severity': 'danger' } ), 401 ) response.headers['Content-Type'] = ""application/json"" return response f._permission_name = permission_str return functools.update_wrapper(wraps, f)","Use this decorator to enable granular security permissions to your API methods. Permissions will be associated to a role, and roles are associated to users. By default the permission's name is the methods name. this will return a message and HTTP 401 is case of unauthorized access." "def _upload_media(self,directory,files=None,resize_request=None): """"""Uploads media file to FLICKR, returns True if uploaded successfully, Will replace if already uploaded, If megapixels > 0, will scale photos before upload If no filename given, will go through all files in DB"""""" # Connect if we aren't already if not self._connectToFlickr(): logger.error(""%s - Couldn't connect to flickr"") return False _tags=self._load_tags(directory) _megapixels=self._load_megapixels(directory) # If no files given, use files from DB in dir if not files: db=self._loadDB(directory) files=db.keys() #If only one file given, make it a list if isinstance(files,basestring): files=[files] files.sort() for filename in files: #FIXME: If this fails, should send a list # to Upload() about which files DID make it, # so we don't have to upload it again! status,replaced=self._upload_or_replace_flickr(directory,filename, \ _tags, _megapixels,resize_request) if not status: return False # If uploaded OK, update photo properties, tags # already taken care of - only update if # this is a new photo (eg, if it was replaced # then we don't need to do this if not replaced: self._update_config_location(directory,filename) self._update_config_sets(directory,filename) return True","Uploads media file to FLICKR, returns True if uploaded successfully, Will replace if already uploaded, If megapixels > 0, will scale photos before upload If no filename given, will go through all files in DB" "def _delLocalOwnerRole(self, username): """"""Remove local owner role from parent object """""" parent = self.getParent() if parent.portal_type == ""Client"": parent.manage_delLocalRoles([username]) # reindex object security self._recursive_reindex_object_security(parent)",Remove local owner role from parent object "def _fields_from_table(spec_table, id_key): """"""Read a specification and return a list of fields. The given specification is assumed to be in `reST grid table format `_. Parameters ---------- spec_table : str Specification given as a string containing a definition table. id_key : str Dictionary key (= column header) for the ID labels in ``spec``. Returns ------- fields : tuple of dicts Field list of the specification with combined multi-line entries. Each field corresponds to one (multi-)line of the spec. """""" # Reformat the table, throwing away lines not starting with '|' spec_lines = [line[1:-1].rstrip() for line in spec_table.splitlines() if line.startswith('|')] # Guess the CSV dialect and read the table, producing an iterable dialect = csv.Sniffer().sniff(spec_lines[0], delimiters='|') reader = csv.DictReader(spec_lines, dialect=dialect) # Read the fields as dictionaries and transform keys and values to # lowercase. fields = [] for row in reader: new_row = {} if row[id_key].strip(): # Start of a new field, indicated by a nontrivial ID entry for key, val in row.items(): new_row[key.strip()] = val.strip() fields.append(new_row) else: # We have the second row of a multi-line field. We # append all stripped values to the corresponding existing entry # value with an extra space. if not fields: # Just to make sure that this situation did not happen at # the very beginning of the table continue for key, val in row.items(): fields[-1][key.strip()] += (' ' + val).rstrip() return tuple(fields)","Read a specification and return a list of fields. The given specification is assumed to be in `reST grid table format `_. Parameters ---------- spec_table : str Specification given as a string containing a definition table. id_key : str Dictionary key (= column header) for the ID labels in ``spec``. Returns ------- fields : tuple of dicts Field list of the specification with combined multi-line entries. Each field corresponds to one (multi-)line of the spec." "def zoom_leftup(self, event=None): """"""leftup event handler for zoom mode in images"""""" if self.zoom_ini is None: return ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini try: dx = abs(ini_x - event.x) dy = abs(ini_y - event.y) except: dx, dy = 0, 0 t0 = time.time() self.rbbox = None self.zoom_ini = None if (dx > 3) and (dy > 3) and (t0-self.mouse_uptime)>0.1: self.mouse_uptime = t0 zlims, tlims = {}, {} ax = self.axes xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() zlims[ax] = [xmin, xmax, ymin, ymax] if len(self.conf.zoom_lims) == 0: self.conf.zoom_lims.append(zlims) ax_inv = ax.transData.inverted try: x1, y1 = ax_inv().transform((event.x, event.y)) except: x1, y1 = self.x_lastmove, self.y_lastmove try: x0, y0 = ax_inv().transform((ini_x, ini_y)) except: x0, y0 = ini_xd, ini_yd tlims[ax] = [int(round(min(x0, x1))), int(round(max(x0, x1))), int(round(min(y0, y1))), int(round(max(y0, y1)))] self.conf.zoom_lims.append(tlims) # now apply limits: self.set_viewlimits() if callable(self.zoom_callback): self.zoom_callback(wid=self.GetId(), limits=tlims[ax])",leftup event handler for zoom mode in images "def size_history(self,size_data): """"""Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """""" def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all","Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number." "def find_version(filename): """""" Find package version in file. """""" import re content = read(filename) version_match = re.search( r""^__version__ = ['\""]([^'\""]*)['\""]"", content, re.M ) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')",Find package version in file. "def jsonify_log_record(self, log_record): """"""Returns a json string of the log record."""""" return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder, indent=self.json_indent, ensure_ascii=self.json_ensure_ascii)",Returns a json string of the log record. "def b(self, number): """"""A parameter to tune the amount of field length normalisation that is applied when calculating relevance scores. A value of 0 will completely disable any normalisation and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b will be clamped to the range 0 - 1. """""" if number < 0: self._b = 0 elif number > 1: self._b = 1 else: self._b = number","A parameter to tune the amount of field length normalisation that is applied when calculating relevance scores. A value of 0 will completely disable any normalisation and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b will be clamped to the range 0 - 1." "def _get_attr_value(instance, attr, default=None): """""" Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi' """""" value = default if hasattr(instance, attr): value = getattr(instance, attr) if callable(value): value = value() return value","Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi'" "def objects_delete(self, bucket, key): """"""Deletes the specified object. Args: bucket: the name of the bucket. key: the key of the object within the bucket. Raises: Exception if there is an error performing the operation. """""" url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))) datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)","Deletes the specified object. Args: bucket: the name of the bucket. key: the key of the object within the bucket. Raises: Exception if there is an error performing the operation." "def append(self, data): """"""Append data to a file."""""" data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( ""Too big input %s (%s)."" % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()",Append data to a file. "def _init_action_list(self, action_filename): """"""Parses the file and populates the data."""""" self.actions = list() self.hiid_to_action_index = dict() f = codecs.open(action_filename, 'r', encoding='latin-1') first_line = True for line in f: line = line.rstrip() if first_line: # Ignore the first line first_line = False else: self.actions.append(GenewaysAction(line)) latestInd = len(self.actions)-1 hiid = self.actions[latestInd].hiid if hiid in self.hiid_to_action_index: raise Exception('action hiid not unique: %d' % hiid) self.hiid_to_action_index[hiid] = latestInd",Parses the file and populates the data. "def clip_box(dataset, bounds=None, invert=True, factor=0.35): """"""Clips a dataset by a bounding box defined by the bounds. If no bounds are given, a corner of the dataset bounds will be removed. Parameters ---------- bounds : tuple(float) Length 6 iterable of floats: (xmin, xmax, ymin, ymax, zmin, zmax) invert : bool Flag on whether to flip/invert the clip factor : float, optional If bounds are not given this is the factor along each axis to extract the default box. """""" if bounds is None: def _get_quarter(dmin, dmax): """"""internal helper to get a section of the given range"""""" return dmax - ((dmax - dmin) * factor) xmin, xmax, ymin, ymax, zmin, zmax = dataset.bounds xmin = _get_quarter(xmin, xmax) ymin = _get_quarter(ymin, ymax) zmin = _get_quarter(zmin, zmax) bounds = [xmin, xmax, ymin, ymax, zmin, zmax] if isinstance(bounds, (float, int)): bounds = [bounds, bounds, bounds] if len(bounds) == 3: xmin, xmax, ymin, ymax, zmin, zmax = dataset.bounds bounds = (xmin,xmin+bounds[0], ymin,ymin+bounds[1], zmin,zmin+bounds[2]) if not isinstance(bounds, collections.Iterable) or len(bounds) != 6: raise AssertionError('Bounds must be a length 6 iterable of floats') xmin, xmax, ymin, ymax, zmin, zmax = bounds alg = vtk.vtkBoxClipDataSet() alg.SetInputDataObject(dataset) alg.SetBoxClip(xmin, xmax, ymin, ymax, zmin, zmax) port = 0 if invert: # invert the clip if needed port = 1 alg.GenerateClippedOutputOn() alg.Update() return _get_output(alg, oport=port)","Clips a dataset by a bounding box defined by the bounds. If no bounds are given, a corner of the dataset bounds will be removed. Parameters ---------- bounds : tuple(float) Length 6 iterable of floats: (xmin, xmax, ymin, ymax, zmin, zmax) invert : bool Flag on whether to flip/invert the clip factor : float, optional If bounds are not given this is the factor along each axis to extract the default box." "def to_dict(self): '''Save this configuration set into a dictionary.''' d = {'id': self.id} data = [] for c in self._config_data: data.append(c.to_dict()) if data: d['configurationData'] = data return d",Save this configuration set into a dictionary. "def get_colour(index): """""" get color number index. """""" colours = [ 'red', 'blue', 'green', 'pink', 'yellow', 'magenta', 'orange', 'cyan', ] default_colour = 'purple' if index < len(colours): return colours[index] else: return default_colour",get color number index. "def list_all_directories(self): """""" Utility method that yields all directories on the device's file systems. """""" def list_dirs_recursively(directory): if directory == self.filesystem: yield directory d_gen = itertools.chain( directory.directories, *tuple(list_dirs_recursively(d) for d in directory.directories)) for d in d_gen: yield d return list_dirs_recursively(self.filesystem)","Utility method that yields all directories on the device's file systems." "def psql(self, args): r""""""Invoke psql, passing the given command-line arguments. Typical values: ['-c', ] or ['-f', ]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: ""\set ON_ERROR_STOP TRUE"" @return: None. Raises an exception upon error, but *ignores SQL errors* unless ""\set ON_ERROR_STOP TRUE"" is used. """""" argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, ] + args + [self.db_name] subprocess.check_call(argv)","r""""""Invoke psql, passing the given command-line arguments. Typical values: ['-c', ] or ['-f', ]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: ""\set ON_ERROR_STOP TRUE"" @return: None. Raises an exception upon error, but *ignores SQL errors* unless ""\set ON_ERROR_STOP TRUE"" is used." "def set_bracket_matcher_color_scheme(self, color_scheme): """"""Set color scheme for matched parentheses."""""" bsh = sh.BaseSH(parent=self, color_scheme=color_scheme) mpcolor = bsh.get_matched_p_color() self._bracket_matcher.format.setBackground(mpcolor)",Set color scheme for matched parentheses. "def gcd(a, b): """""" Calculates the Greatest Common Divisor. :param a: the first number. :param b: the second number. :return: GCD(a, b) """""" while b: a, b = b, a % b return a","Calculates the Greatest Common Divisor. :param a: the first number. :param b: the second number. :return: GCD(a, b)" "def sort(self): """""" Sort triggers and their associated responses """""" # Sort triggers by word and character length first for priority, triggers in self._triggers.items(): self._log.debug('Sorting priority {priority} triggers'.format(priority=priority)) # Get and sort our atomic and wildcard patterns atomics = [trigger for trigger in triggers if trigger.pattern_is_atomic] wildcards = [trigger for trigger in triggers if not trigger.pattern_is_atomic] atomics = sorted(atomics, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True) wildcards = sorted(wildcards, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True) # Replace our sorted triggers self._triggers[priority] = atomics + wildcards # Finally, sort triggers by priority self._sorted_triggers = [] for triggers in [self._triggers[priority] for priority in sorted(self._triggers.keys(), reverse=True)]: for trigger in triggers: self._sorted_triggers.append(trigger) self.sorted = True",Sort triggers and their associated responses "def from_string(cls, link): """"""Return a new SheetUrl instance from parsed URL string. >>> SheetUrl.from_string('https://docs.google.com/spreadsheets/d/spam') """""" ma = cls._pattern.search(link) if ma is None: raise ValueError(link) id = ma.group('id') return cls(id)","Return a new SheetUrl instance from parsed URL string. >>> SheetUrl.from_string('https://docs.google.com/spreadsheets/d/spam') " "def issueJob(self, jobNode): """"""Add a job to the queue of jobs."""""" jobNode.command = ' '.join((resolveEntryPoint('_toil_worker'), jobNode.jobName, self.jobStoreLocator, jobNode.jobStoreID)) # jobBatchSystemID is an int that is an incremented counter for each job jobBatchSystemID = self.batchSystem.issueBatchJob(jobNode) self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] = jobNode if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so increment this value after the job is added to the issuedJob dict self.preemptableJobsIssued += 1 cur_logger = logger.debug if jobNode.jobName.startswith(CWL_INTERNAL_JOBS) else logger.info cur_logger(""Issued job %s with job batch system ID: "" ""%s and cores: %s, disk: %s, and memory: %s"", jobNode, str(jobBatchSystemID), int(jobNode.cores), bytes2human(jobNode.disk), bytes2human(jobNode.memory)) if self.toilMetrics: self.toilMetrics.logIssuedJob(jobNode) self.toilMetrics.logQueueSize(self.getNumberOfJobsIssued())",Add a job to the queue of jobs. "def update(self): """""" Get lates data from the sensor and update own state. Returns: dict: Latest state """""" (data_format, data) = RuuviTagSensor.get_data(self._mac, self._bt_device) if data == self._data: return self._state self._data = data if self._data is None: self._state = {} else: self._state = get_decoder(data_format).decode_data(self._data) return self._state","Get lates data from the sensor and update own state. Returns: dict: Latest state" "def update_mailing_lists_in_m2m( sender=None, userprofile=None, pk_set=None, subscribe=None, unsubscribe=None, verbose=None, email_enabled=None, ): """""" m2m_model = m2m model class for 'email_notifications' or 'sms_notifications'. """""" response = None email_enabled = email_enabled or settings.EMAIL_ENABLED if email_enabled and site_notifications.loaded: if userprofile.email_notifications.through == sender: NotificationModel = django_apps.get_model(""edc_notification.Notification"") for notification_obj in NotificationModel.objects.filter( pk__in=list(pk_set), enabled=True ): notification_cls = site_notifications.get(notification_obj.name) notification = notification_cls() manager = MailingListManager( address=notification.email_to[0], display_name=notification.display_name, name=notification.name, ) response = manager.create(verbose=verbose) if subscribe: response = manager.subscribe(userprofile.user, verbose=verbose) elif unsubscribe: response = manager.unsubscribe(userprofile.user, verbose=verbose) return response","m2m_model = m2m model class for 'email_notifications' or 'sms_notifications'." "def DoxyfileParse(file_contents): """""" Parse a Doxygen source file and return a dictionary of all the values. Values will be strings and lists of strings. """""" data = {} import shlex lex = shlex.shlex(instream = file_contents, posix = True) lex.wordchars += ""*+./-:"" lex.whitespace = lex.whitespace.replace(""\n"", """") lex.escape = """" lineno = lex.lineno token = lex.get_token() key = token # the first token should be a key last_token = """" key_token = False next_key = False new_data = True def append_data(data, key, new_data, token): if new_data or len(data[key]) == 0: data[key].append(token) else: data[key][-1] += token while token: if token in ['\n']: if last_token not in ['\\']: key_token = True elif token in ['\\']: pass elif key_token: key = token key_token = False else: if token == ""+="": if not data.has_key(key): data[key] = list() elif token == ""="": if key == ""TAGFILES"" and data.has_key(key): append_data( data, key, False, ""="" ) new_data=False else: data[key] = list() else: append_data( data, key, new_data, token ) new_data = True last_token = token token = lex.get_token() if last_token == '\\' and token != '\n': new_data = False append_data( data, key, new_data, '\\' ) # compress lists of len 1 into single strings for (k, v) in data.items(): if len(v) == 0: data.pop(k) # items in the following list will be kept as lists and not converted to strings if k in [""INPUT"", ""FILE_PATTERNS"", ""EXCLUDE_PATTERNS"", ""TAGFILES""]: continue if len(v) == 1: data[k] = v[0] return data","Parse a Doxygen source file and return a dictionary of all the values. Values will be strings and lists of strings." "def derivLogCdfNormal(z): """""" Robust implementations of derivative of the log cdf of a standard normal. @see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]] in C from Matthias Seeger. """""" if (abs(z) < ERF_CODY_LIMIT1): # Phi(z) approx (1 + y R_3(y^2))/2, y = z/sqrt(2) return 2.0 * np.exp(logPdfNormal(z)) / (1.0 + (z / M_SQRT2) * _erfRationalHelperR3(0.5 * z * z)) elif (z < 0.0): # Phi(z) approx N(z) Q(-z)/(-z), z<0 return -z / _erfRationalHelper(-z) else: t = np.exp(logPdfNormal(z)) return t / (1.0 - t * _erfRationalHelper(z) / z)","Robust implementations of derivative of the log cdf of a standard normal. @see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]] in C from Matthias Seeger." "def contains_empty(features): """"""Check features data are not empty :param features: The features data to check. :type features: list of numpy arrays. :return: True if one of the array is empty, False else. """""" if not features: return True for feature in features: if feature.shape[0] == 0: return True return False","Check features data are not empty :param features: The features data to check. :type features: list of numpy arrays. :return: True if one of the array is empty, False else." "def expect(self, value, message='Failed: ""{actual} {operator} {expected}"" after step ""{step}""', operator='=='): """"""Add an 'assertion' action to the steps. This will evaluate the return value of the last 'do' step and compare it to the value passed here using the specified operator. Checks that the first function will return 2 >>> AssertionChain().do(lambda: 1 + 1, 'add 1 + 1').expect(2) This will check that your function does not return None >>> AssertionChain().do(lambda: myfunction(), 'call my function').expect(None, operator='is not') :param value: The expected value :param message: The error message to raise if the assertion fails. You can access the variables: {actual} -- The actual value {expected} -- The expected value {step} -- The step just performed, which did not meet the expectation {operator} -- The operator used to make the comparison """""" if operator not in self.valid_operators: raise ValueError('Illegal operator specified for ') self.items.put(ChainItem(value, self.expect, message, operator=operator)) return self","Add an 'assertion' action to the steps. This will evaluate the return value of the last 'do' step and compare it to the value passed here using the specified operator. Checks that the first function will return 2 >>> AssertionChain().do(lambda: 1 + 1, 'add 1 + 1').expect(2) This will check that your function does not return None >>> AssertionChain().do(lambda: myfunction(), 'call my function').expect(None, operator='is not') :param value: The expected value :param message: The error message to raise if the assertion fails. You can access the variables: {actual} -- The actual value {expected} -- The expected value {step} -- The step just performed, which did not meet the expectation {operator} -- The operator used to make the comparison" "def get_update_sql(self, rows): """""" Returns SQL UPDATE for rows ``rows`` .. code-block:: sql UPDATE table_name SET field1 = new_values.field1 field2 = new_values.field2 FROM ( VALUES (1, 'value1', 'value2'), (2, 'value1', 'value2') ) AS new_values (id, field1, field2) WHERE table_name.id = new_values.id; """""" field_names = self.get_field_names() pk = field_names[0] update_field_names = field_names[1:] num_columns = len(rows[0]) if num_columns < 2: raise Exception('At least 2 fields must be passed to get_update_sql') all_null_indices = [ all(row[index] is None for row in rows) for index in range(1, num_columns) ] field_names_sql = '({0})'.format(', '.join(field_names)) row_values = [] sql_args = [] for row in rows: placeholders = [] for value in row: sql_args.append(value) placeholders.append('%s') row_values.append('({0})'.format(', '.join(placeholders))) row_values_sql = ', '.join(row_values) # build field list for SET portion set_field_list = [ '{0} = NULL'.format(field_name) if all_null_indices[idx] else '{0} = new_values.{0}'.format(field_name) for idx, field_name in enumerate(update_field_names) ] set_field_list_sql = ', '.join(set_field_list) self.sql = 'UPDATE {0} SET {1} FROM (VALUES {2}) AS new_values {3} WHERE {0}.{4} = new_values.{4}'.format( self.tables[0].get_identifier(), set_field_list_sql, row_values_sql, field_names_sql, pk ) return self.sql, sql_args","Returns SQL UPDATE for rows ``rows`` .. code-block:: sql UPDATE table_name SET field1 = new_values.field1 field2 = new_values.field2 FROM ( VALUES (1, 'value1', 'value2'), (2, 'value1', 'value2') ) AS new_values (id, field1, field2) WHERE table_name.id = new_values.id;" "def discard_all(self, filterfunc=None): """"""Discard all waiting messages. :param filterfunc: A filter function to only discard the messages this filter returns. :returns: the number of messages discarded. *WARNING*: All incoming messages will be ignored and not processed. Example using filter: >>> def waiting_feeds_only(message): ... try: ... message_data = message.decode() ... except: # Should probably be more specific. ... pass ... ... if message_data.get(""type"") == ""feed"": ... return True ... else: ... return False """""" if not filterfunc: return self.backend.queue_purge(self.queue) if self.no_ack or self.auto_ack: raise Exception(""discard_all: Can't use filter with auto/no-ack."") discarded_count = 0 while True: message = self.fetch() if message is None: return discarded_count if filterfunc(message): message.ack() discarded_count += 1","Discard all waiting messages. :param filterfunc: A filter function to only discard the messages this filter returns. :returns: the number of messages discarded. *WARNING*: All incoming messages will be ignored and not processed. Example using filter: >>> def waiting_feeds_only(message): ... try: ... message_data = message.decode() ... except: # Should probably be more specific. ... pass ... ... if message_data.get(""type"") == ""feed"": ... return True ... else: ... return False" "def get_stats(self, request, context): """"""Returns the server statistics."""""" _log_request(request, context) m = self.listener.memory return clearly_pb2.StatsMessage( task_count=m.task_count, event_count=m.event_count, len_tasks=len(m.tasks), len_workers=len(m.workers) )",Returns the server statistics. "def get_name(default: str = 'no name set'): """""" Get the currently-configured name of the machine """""" try: with open('/etc/machine-info') as emi: contents = emi.read() except OSError: LOG.exception( ""Couldn't read /etc/machine-info"") contents = '' for line in contents.split('\n'): if line.startswith('PRETTY_HOSTNAME='): return '='.join(line.split('=')[1:]) LOG.warning(f""No PRETTY_HOSTNAME in {contents}, defaulting to {default}"") try: _update_pretty_hostname(default) except OSError: LOG.exception(""Could not write new pretty hostname!"") return default",Get the currently-configured name of the machine "def _get_error_response(self, exception): """""" Trasform pyston exceptions to Is-core exceptions and raise it """""" response_exceptions = { MimerDataException: HTTPBadRequestResponseException, NotAllowedException: HTTPForbiddenResponseException, UnsupportedMediaTypeException: HTTPUnsupportedMediaTypeResponseException, Http404: Http404, ResourceNotFoundException: Http404, NotAllowedMethodException: HTTPMethodNotAllowedResponseException, DuplicateEntryException: HTTPDuplicateResponseException, ConflictException: HTTPDuplicateResponseException, } response_exception = response_exceptions.get(type(exception)) if response_exception: raise response_exception return super(RESTResourceMixin, self)._get_error_response(exception)",Trasform pyston exceptions to Is-core exceptions and raise it "def _handle_request_noblock(self): """""" Handle one request, without blocking. """""" try: request, client_address = self.get_request() except socket.error: return if self.verify_request(request, client_address): try: self.process_request(request, client_address) except: self.handle_error(request, client_address) self.shutdown_request(request)","Handle one request, without blocking." "def search_configuration_files(findstr, replacestr = None): '''This function could be used to find and replace paths in the configuration files. At present, it only finds phrases.''' F = open(configurationFilesLocation, ""r"") lines = F.readlines() F.close() allerrors = {} alloutput = {} for line in lines: line = line.strip() if line: if line.endswith(""generate_fragments.py""): # Do not parse the Python script but check that it exists if not(os.path.exists(line)): allerrors[line] = ""File/directory %s does not exist."" % line else: cmd = [""grep"", ""-n"", ""-i"", findstr, line] output = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() errors = output[1] output = output[0] if errors: errors = errors.strip() allerrors[line] = errors if output: output = output.strip() alloutput[line] = output.split(""\n"") return alloutput, allerrors","This function could be used to find and replace paths in the configuration files. At present, it only finds phrases." "def xSectionChunk(lines): """""" Parse XSEC Method """""" # Constants KEYWORDS = ('MANNINGS_N', 'BOTTOM_WIDTH', 'BANKFULL_DEPTH', 'SIDE_SLOPE', 'NPAIRS', 'NUM_INTERP', 'X1', 'ERODE', 'MAX_EROSION', 'SUBSURFACE', 'M_RIVER', 'K_RIVER') result = {'mannings_n': None, 'bottom_width': None, 'bankfull_depth': None, 'side_slope': None, 'npairs': None, 'num_interp': None, 'erode': False, 'subsurface': False, 'max_erosion': None, 'm_river': None, 'k_river': None, 'breakpoints': []} chunks = pt.chunk(KEYWORDS, lines) # Parse chunks associated with each key for key, chunkList in iteritems(chunks): # Parse each chunk in the chunk list for chunk in chunkList: # Strip and split the line (only one item in each list) schunk = chunk[0].strip().split() # Cases if key == 'X1': # Extract breakpoint XY pairs x = schunk[1] y = schunk[2] result['breakpoints'].append({'x': x, 'y': y}) if key in ('SUBSURFACE', 'ERODE'): # Set booleans result[key.lower()] = True else: # Extract value result[key.lower()] = schunk[1] return result",Parse XSEC Method "def loop(self, timeout=1.0, max_packets=1): """"""Process network events. This function must be called regularly to ensure communication with the broker is carried out. It calls select() on the network socket to wait for network events. If incoming data is present it will then be processed. Outgoing commands, from e.g. publish(), are normally sent immediately that their function is called, but this is not always possible. loop() will also attempt to send any remaining outgoing messages, which also includes commands that are part of the flow for messages with QoS>0. timeout: The time in seconds to wait for incoming/outgoing network traffic before timing out and returning. max_packets: Not currently used. Returns MQTT_ERR_SUCCESS on success. Returns >0 on error. A ValueError will be raised if timeout < 0"""""" if timeout < 0.0: raise ValueError('Invalid timeout.') self._current_out_packet_mutex.acquire() self._out_packet_mutex.acquire() if self._current_out_packet is None and len(self._out_packet) > 0: self._current_out_packet = self._out_packet.pop(0) if self._current_out_packet: wlist = [self.socket()] else: wlist = [] self._out_packet_mutex.release() self._current_out_packet_mutex.release() # sockpairR is used to break out of select() before the timeout, on a # call to publish() etc. rlist = [self.socket(), self._sockpairR] try: socklist = select.select(rlist, wlist, [], timeout) except TypeError as e: # Socket isn't correct type, in likelihood connection is lost return MQTT_ERR_CONN_LOST except ValueError: # Can occur if we just reconnected but rlist/wlist contain a -1 for # some reason. return MQTT_ERR_CONN_LOST except: return MQTT_ERR_UNKNOWN if self.socket() in socklist[0]: rc = self.loop_read(max_packets) if rc or (self._ssl is None and self._sock is None): return rc if self._sockpairR in socklist[0]: # Stimulate output write even though we didn't ask for it, because # at that point the publish or other command wasn't present. socklist[1].insert(0, self.socket()) # Clear sockpairR - only ever a single byte written. try: self._sockpairR.recv(1) except socket.error as err: if err.errno != EAGAIN: raise if self.socket() in socklist[1]: rc = self.loop_write(max_packets) if rc or (self._ssl is None and self._sock is None): return rc return self.loop_misc()","Process network events. This function must be called regularly to ensure communication with the broker is carried out. It calls select() on the network socket to wait for network events. If incoming data is present it will then be processed. Outgoing commands, from e.g. publish(), are normally sent immediately that their function is called, but this is not always possible. loop() will also attempt to send any remaining outgoing messages, which also includes commands that are part of the flow for messages with QoS>0. timeout: The time in seconds to wait for incoming/outgoing network traffic before timing out and returning. max_packets: Not currently used. Returns MQTT_ERR_SUCCESS on success. Returns >0 on error. A ValueError will be raised if timeout < 0" "def find_ent_endurance_tier_price(package, tier_level): """"""Find the price in the given package with the specified tier level :param package: The Enterprise (Endurance) product package :param tier_level: The endurance tier for which a price is desired :return: Returns the price for the given tier, or an error if not found """""" for item in package['items']: for attribute in item.get('attributes', []): if int(attribute['value']) == ENDURANCE_TIERS.get(tier_level): break else: continue price_id = _find_price_id(item['prices'], 'storage_tier_level') if price_id: return price_id raise ValueError(""Could not find price for endurance tier level"")","Find the price in the given package with the specified tier level :param package: The Enterprise (Endurance) product package :param tier_level: The endurance tier for which a price is desired :return: Returns the price for the given tier, or an error if not found" "def fixation_detection(samplemat, saccades, Hz=200, samples2fix=None, respect_trial_borders=False, sample_times=None): ''' Detect Fixation from saccades. Fixations are defined as intervals between saccades. This function also calcuates start and end times (in ms) for each fixation. Input: samplemat: datamat Contains the recorded samples and associated metadata. saccades: ndarray Logical vector that is True for samples that belong to a saccade. Hz: Float Number of samples per second. samples2fix: Dict There is usually metadata associated with the samples (e.g. the trial number). This dictionary can be used to specify how the metadata should be collapsed for one fixation. It contains field names from samplemat as keys and functions as values that return one value when they are called with all samples for one fixation. In addition the function can raise an 'InvalidFixation' exception to signal that the fixation should be discarded. ''' if samples2fix is None: samples2fix = {} fixations = ~saccades acc = AccumulatorFactory() if not respect_trial_borders: borders = np.where(np.diff(fixations.astype(int)))[0] + 1 else: borders = np.where( ~(np.diff(fixations.astype(int)) == 0) | ~(np.diff(samplemat.trial.astype(int)) == 0))[0] + 1 fixations = 0 * saccades.copy() if not saccades[0]: borders = np.hstack(([0], borders)) #lasts,laste = borders[0], borders[1] for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): current = {} for k in samplemat.fieldnames(): if k in list(samples2fix.keys()): current[k] = samples2fix[k](samplemat, k, start, end) else: current[k] = np.mean(samplemat.field(k)[start:end]) current['start_sample'] = start current['end_sample'] = end fixations[start:end] = 1 # Calculate start and end time in ms if sample_times is None: current['start'] = 1000 * start / Hz current['end'] = 1000 * end / Hz else: current['start'] = sample_times[start] current['end'] = sample_times[end] #lasts, laste = start,end acc.update(current) return acc.get_dm(params=samplemat.parameters()), fixations.astype(bool)","Detect Fixation from saccades. Fixations are defined as intervals between saccades. This function also calcuates start and end times (in ms) for each fixation. Input: samplemat: datamat Contains the recorded samples and associated metadata. saccades: ndarray Logical vector that is True for samples that belong to a saccade. Hz: Float Number of samples per second. samples2fix: Dict There is usually metadata associated with the samples (e.g. the trial number). This dictionary can be used to specify how the metadata should be collapsed for one fixation. It contains field names from samplemat as keys and functions as values that return one value when they are called with all samples for one fixation. In addition the function can raise an 'InvalidFixation' exception to signal that the fixation should be discarded." "def is_feeder(self, team_id=None): """"""Ensure ther resource has the role FEEDER."""""" if team_id is None: return self._is_feeder team_id = uuid.UUID(str(team_id)) if team_id not in self.teams_ids: return False return self.teams[team_id]['role'] == 'FEEDER'",Ensure ther resource has the role FEEDER. "def from_rational( cls, value, to_base, precision=None, method=RoundingMethods.ROUND_DOWN ): """""" Convert rational value to a base. :param Rational value: the value to convert :param int to_base: base of result, must be at least 2 :param precision: number of digits in total or None :type precision: int or NoneType :param method: rounding method :type method: element of RoundingMethods.METHODS() :returns: the conversion result and its relation to actual result :rtype: Radix * int :raises BasesValueError: if to_base is less than 2 Complexity: Uncalculated. """""" # pylint: disable=too-many-locals if to_base < 2: raise BasesValueError(to_base, ""to_base"", ""must be at least 2"") if precision is not None and precision < 0: raise BasesValueError(precision, ""precision"", ""must be at least 0"") if value == 0: non_repeating_part = [] if precision is None else precision * [0] return (Radix(0, [], non_repeating_part, [], to_base), 0) if value < 0: sign = -1 else: sign = 1 div_method = method if sign == -1: value = abs(value) div_method = cls._reverse_rounding_method(method) numerator = Nats.convert_from_int(value.numerator, to_base) denominator = Nats.convert_from_int(value.denominator, to_base) (integer_part, non_repeating_part, repeating_part, relation) = \ NatDivision.division( denominator, numerator, to_base, precision, div_method ) relation = relation * sign result = Radix( sign, integer_part, non_repeating_part, repeating_part, to_base ) if precision is not None: (result, rel) = result.rounded(precision, method) relation = relation if rel == 0 else rel return (result, relation)","Convert rational value to a base. :param Rational value: the value to convert :param int to_base: base of result, must be at least 2 :param precision: number of digits in total or None :type precision: int or NoneType :param method: rounding method :type method: element of RoundingMethods.METHODS() :returns: the conversion result and its relation to actual result :rtype: Radix * int :raises BasesValueError: if to_base is less than 2 Complexity: Uncalculated." "def _find_form_xobject_images(pdf, container, contentsinfo): """"""Find any images that are in Form XObjects in the container The container may be a page, or a parent Form XObject. """""" if '/Resources' not in container: return resources = container['/Resources'] if '/XObject' not in resources: return xobjs = resources['/XObject'].as_dict() for xobj in xobjs: candidate = xobjs[xobj] if candidate['/Subtype'] != '/Form': continue form_xobject = candidate for settings in contentsinfo.xobject_settings: if settings.name != xobj: continue # Find images once for each time this Form XObject is drawn. # This could be optimized to cache the multiple drawing events # but in practice both Form XObjects and multiple drawing of the # same object are both very rare. ctm_shorthand = settings.shorthand yield from _process_content_streams( pdf=pdf, container=form_xobject, shorthand=ctm_shorthand )","Find any images that are in Form XObjects in the container The container may be a page, or a parent Form XObject." "def validate_uuid(value): """""" UUID 128-bit validator """""" if value and not isinstance(value, UUID): try: return UUID(str(value), version=4) except (AttributeError, ValueError): raise ValidationError('not a valid UUID') return value",UUID 128-bit validator "def _has_tag(version, debug=False): """""" Determine a version is a local git tag name or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool """""" cmd = sh.git.bake('show-ref', '--verify', '--quiet', ""refs/tags/{}"".format(version)) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False","Determine a version is a local git tag name or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool" "def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False): """""" Use snakebite.mkdir, if available. Snakebite's mkdir method allows control over full path creation, so by default, tell it to build a full path to work like ``hadoop fs -mkdir``. :param path: HDFS path to create :type path: string :param parents: create any missing parent directories :type parents: boolean, default is True :param mode: \\*nix style owner/group/other permissions :type mode: octal, default 0755 """""" result = list(self.get_bite().mkdir(self.list_path(path), create_parent=parents, mode=mode)) if raise_if_exists and ""ile exists"" in result[0].get('error', ''): raise luigi.target.FileAlreadyExists(""%s exists"" % (path, )) return result","Use snakebite.mkdir, if available. Snakebite's mkdir method allows control over full path creation, so by default, tell it to build a full path to work like ``hadoop fs -mkdir``. :param path: HDFS path to create :type path: string :param parents: create any missing parent directories :type parents: boolean, default is True :param mode: \\*nix style owner/group/other permissions :type mode: octal, default 0755" "def sparkline(data): ''' Return a spark line for the given data set. :value data: sequence of numeric values >>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP ▁▂▃▄▅▆▅▄▃▁▅▆ ''' min_value = float(min(data)) max_value = float(max(data)) steps = (max_value - min_value) / float(len(SPARKCHAR) - 1) return ''.join([ SPARKCHAR[int((float(value) - min_value) / steps)] for value in data ])","Return a spark line for the given data set. :value data: sequence of numeric values >>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP ▁▂▃▄▅▆▅▄▃▁▅▆" "def transform_literals(rdf, literalmap): """"""Transform literal properties of Concepts, as defined by config file."""""" affected_types = (SKOS.Concept, SKOS.Collection, SKOSEXT.DeprecatedConcept) props = set() for t in affected_types: for conc in rdf.subjects(RDF.type, t): for p, o in rdf.predicate_objects(conc): if isinstance(o, Literal) \ and (p in literalmap or not in_general_ns(p)): props.add(p) for p in props: if mapping_match(p, literalmap): newval = mapping_get(p, literalmap) newuris = [v[0] for v in newval] logging.debug(""transform literal %s -> %s"", p, str(newuris)) replace_predicate( rdf, p, newuris, subjecttypes=affected_types) else: logging.info(""Don't know what to do with literal %s"", p)","Transform literal properties of Concepts, as defined by config file." "def project_sequence(s, permutation=None): """""" Projects a point or sequence of points using `project_point` to lists xs, ys for plotting with Matplotlib. Parameters ---------- s, Sequence-like The sequence of points (3-tuples) to be projected. Returns ------- xs, ys: The sequence of projected points in coordinates as two lists """""" xs, ys = unzip([project_point(p, permutation=permutation) for p in s]) return xs, ys","Projects a point or sequence of points using `project_point` to lists xs, ys for plotting with Matplotlib. Parameters ---------- s, Sequence-like The sequence of points (3-tuples) to be projected. Returns ------- xs, ys: The sequence of projected points in coordinates as two lists" "def has_signature(body, sender): '''Checks if the body has signature. Returns True or False.''' non_empty = [line for line in body.splitlines() if line.strip()] candidate = non_empty[-SIGNATURE_MAX_LINES:] upvotes = 0 for line in candidate: # we check lines for sender's name, phone, email and url, # those signature lines don't take more then 27 lines if len(line.strip()) > 27: continue elif contains_sender_names(sender)(line): return True elif (binary_regex_search(RE_RELAX_PHONE)(line) + binary_regex_search(RE_EMAIL)(line) + binary_regex_search(RE_URL)(line) == 1): upvotes += 1 if upvotes > 1: return True",Checks if the body has signature. Returns True or False. "def _get_inherited_field_types(class_to_field_type_overrides, schema_graph): """"""Return a dictionary describing the field type overrides in subclasses."""""" inherited_field_type_overrides = dict() for superclass_name, field_type_overrides in class_to_field_type_overrides.items(): for subclass_name in schema_graph.get_subclass_set(superclass_name): inherited_field_type_overrides.setdefault(subclass_name, dict()) inherited_field_type_overrides[subclass_name].update(field_type_overrides) return inherited_field_type_overrides",Return a dictionary describing the field type overrides in subclasses. "def parse_masked_phone_number(html, parser=None): """"""Get masked phone number from security check html :param html: str: raw html text :param parser: bs4.BeautifulSoup: html parser :return: tuple of phone prefix and suffix, for example: ('+1234', '89') :rtype : tuple """""" if parser is None: parser = bs4.BeautifulSoup(html, 'html.parser') fields = parser.find_all('span', {'class': 'field_prefix'}) if not fields: raise VkParseError( 'No ... in the \n%s' % html) result = [] for f in fields: value = f.get_text().replace(six.u('\xa0'), '') result.append(value) return tuple(result)","Get masked phone number from security check html :param html: str: raw html text :param parser: bs4.BeautifulSoup: html parser :return: tuple of phone prefix and suffix, for example: ('+1234', '89') :rtype : tuple" "def emojificate_filter(content, autoescape=True): ""Convert any emoji in a string into accessible content."" # return mark_safe(emojificate(content)) if autoescape: esc = conditional_escape else: esc = lambda x: x return mark_safe(emojificate(esc(content)))",Convert any emoji in a string into accessible content. "def spacelist(listtospace, spacechar="" ""): """""" Convert a list to a string with all of the list's items spaced out. :type listtospace: list :param listtospace: The list to space out. :type spacechar: string :param spacechar: The characters to insert between each list item. Default is: "" "". """""" output = '' space = '' output += str(listtospace[0]) space += spacechar for listnum in range(1, len(listtospace)): output += space output += str(listtospace[listnum]) return output","Convert a list to a string with all of the list's items spaced out. :type listtospace: list :param listtospace: The list to space out. :type spacechar: string :param spacechar: The characters to insert between each list item. Default is: "" ""." "def get_params(self, tid): """""" Returns the parameters found in the stack when the hooked function was last called by this thread. @type tid: int @param tid: Thread global ID. @rtype: tuple( arg, arg, arg... ) @return: Tuple of arguments. """""" try: params = self.get_params_stack(tid)[-1] except IndexError: msg = ""Hooked function called from thread %d already returned"" raise IndexError(msg % tid) return params","Returns the parameters found in the stack when the hooked function was last called by this thread. @type tid: int @param tid: Thread global ID. @rtype: tuple( arg, arg, arg... ) @return: Tuple of arguments." "def make_heading_abstracts(self, heading_div): """""" An article may contain data for various kinds of abstracts. This method works on those that are included in the Heading. This is displayed after the Authors and Affiliations. Metadata element, content derived from FrontMatter """""" for abstract in self.article.root.xpath('./front/article-meta/abstract'): #Make a copy of the abstract abstract_copy = deepcopy(abstract) abstract_copy.tag = 'div' #Abstracts are a rather diverse bunch, keep an eye on them! title_text = abstract_copy.xpath('./title[1]/text()') for title in abstract_copy.findall('.//title'): remove(title) #Create a header for the abstract abstract_header = etree.Element('h2') remove_all_attributes(abstract_copy) #Set the header text and abstract id according to abstract type abstract_type = abstract.attrib.get('abstract-type') log.debug('Handling Abstrace of with abstract-type=""{0}""'.format(abstract_type)) if abstract_type == 'summary': abstract_header.text = 'Author Summary' abstract_copy.attrib['id'] = 'author-summary' elif abstract_type == 'editors-summary': abstract_header.text = 'Editors\' Summary' abstract_copy.attrib['id'] = 'editor-summary' elif abstract_type == 'synopsis': abstract_header.text = 'Synopsis' abstract_copy.attrib['id'] = 'synopsis' elif abstract_type == 'alternate': #Right now, these will only be included if there is a title to #give it if title_text: abstract_header.text= title_text[0] abstract_copy.attrib['id'] = 'alternate' else: continue elif abstract_type is None: abstract_header.text = 'Abstract' abstract_copy.attrib['id'] = 'abstract' elif abstract_type == 'toc': # We don't include these continue else: # Warn about these, then skip log.warning('No handling for abstract-type=""{0}""'.format(abstract_type)) continue #abstract_header.text = abstract_type #abstract_copy.attrib['id'] = abstract_type heading_div.append(abstract_header) heading_div.append(abstract_copy)","An article may contain data for various kinds of abstracts. This method works on those that are included in the Heading. This is displayed after the Authors and Affiliations. Metadata element, content derived from FrontMatter" "def clean(image, mask=None, iterations = 1): '''Remove isolated pixels 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 0 0 0 0 Border pixels and pixels adjoining masks are removed unless one valid neighbor is true. ''' global clean_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, clean_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result","Remove isolated pixels 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 0 0 0 0 Border pixels and pixels adjoining masks are removed unless one valid neighbor is true." "def _skip_frame(self): """"""Skip the next time frame"""""" for line in self._f: if line == 'ITEM: ATOMS\n': break for i in range(self.num_atoms): next(self._f)",Skip the next time frame "def to_cfn_resource_name(name): # type: (str) -> str """"""Transform a name to a valid cfn name. This will convert the provided name to a CamelCase name. It's possible that the conversion to a CFN resource name can result in name collisions. It's up to the caller to handle name collisions appropriately. """""" if not name: raise ValueError(""Invalid name: %r"" % name) word_separators = ['-', '_'] for word_separator in word_separators: word_parts = [p for p in name.split(word_separator) if p] name = ''.join([w[0].upper() + w[1:] for w in word_parts]) return re.sub(r'[^A-Za-z0-9]+', '', name)","Transform a name to a valid cfn name. This will convert the provided name to a CamelCase name. It's possible that the conversion to a CFN resource name can result in name collisions. It's up to the caller to handle name collisions appropriately." "def rank(keys, axis=semantics.axis_default): """"""where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys """""" index = as_index(keys, axis) return index.rank","where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys" "def _do_serialize(struct, fmt, encoding): """"""Actually serialize input. Args: struct: structure to serialize to fmt: format to serialize to encoding: encoding to use while serializing Returns: encoded serialized structure Raises: various sorts of errors raised by libraries while serializing """""" res = None _check_lib_installed(fmt, 'serialize') if fmt == 'ini': config = configobj.ConfigObj(encoding=encoding) for k, v in struct.items(): config[k] = v res = b'\n'.join(config.write()) elif fmt in ['json', 'json5']: # specify separators to get rid of trailing whitespace # specify ensure_ascii to make sure unicode is serialized in \x... sequences, # not in \u sequences res = (json if fmt == 'json' else json5).dumps(struct, indent=2, separators=(',', ': '), ensure_ascii=False).encode(encoding) elif fmt == 'toml': if not _is_utf8(encoding): raise AnyMarkupError('toml must always be utf-8 encoded according to specification') res = toml.dumps(struct).encode(encoding) elif fmt == 'xml': # passing encoding argument doesn't encode, just sets the xml property res = xmltodict.unparse(struct, pretty=True, encoding='utf-8').encode('utf-8') elif fmt == 'yaml': res = yaml.safe_dump(struct, encoding='utf-8', default_flow_style=False) else: raise # unknown format return res","Actually serialize input. Args: struct: structure to serialize to fmt: format to serialize to encoding: encoding to use while serializing Returns: encoded serialized structure Raises: various sorts of errors raised by libraries while serializing" "def update_reading_list(self, reading_list): """"""Generic behaviors for reading lists before being rendered."""""" # remove the current piece of content from the query. reading_list = reading_list.filter( ~es_filter.Ids(values=[self.id]) ) # remove excluded document types from the query. reading_list_config = getattr(settings, ""READING_LIST_CONFIG"", {}) excluded_doc_types = reading_list_config.get(""excluded_doc_types"", []) for obj in excluded_doc_types: reading_list = reading_list.filter(~es_filter.Type(value=obj)) return reading_list",Generic behaviors for reading lists before being rendered. "def wsgi_handler(event, context, app, logger): """"""lambda handler function. This function runs the WSGI app with it and collects its response, then translates the response back into the format expected by the API Gateway proxy integration. """""" environ = build_wsgi_environ_from_event(event) wsgi_status = [] wsgi_headers = [] logger.info('Processing {} request'.format(environ['REQUEST_METHOD'])) def start_response(status, headers): if len(wsgi_status) or len(wsgi_headers): raise RuntimeError('start_response called more than once!') wsgi_status.append(status) wsgi_headers.append(headers) resp = list(app(environ, start_response)) proxy = {'statusCode': int(wsgi_status[0].split()[0]), 'headers': {h[0]: h[1] for h in wsgi_headers[0]}, 'body': b''.join(resp).decode('utf-8')} logger.info(""Returning {}"".format(proxy['statusCode']), http_status=proxy['statusCode']) return proxy","lambda handler function. This function runs the WSGI app with it and collects its response, then translates the response back into the format expected by the API Gateway proxy integration." "def set_keyvault_secret(access_token, vault_uri, secret_name, secret_value): '''Adds a secret to a key vault using the key vault URI. Creates a new version if the secret already exists. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net. secret_name (str): Name of the secret to add. secret_value (str): Value of the secret. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([vault_uri, '/secrets/', secret_name, '?api-version=', '7.0']) current_time = datetime.datetime.now().isoformat() attributes = {'created': current_time, 'enabled': True, 'exp': None, 'nbf': None, 'recoveryLevel': 'Purgeable', 'updated': current_time} secret_body = {'attributes': attributes, 'contentType': None, 'kid': None, 'managed': None, 'tags': {'file-encoding': 'utf-8'}, 'value': secret_value} body = json.dumps(secret_body) print(body) return do_put(endpoint, body, access_token)","Adds a secret to a key vault using the key vault URI. Creates a new version if the secret already exists. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net. secret_name (str): Name of the secret to add. secret_value (str): Value of the secret. Returns: HTTP response. 200 OK." "def cmd(binary, subcommand, *args, **kwargs): """""" Construct a command line for a ""modern UNIX"" command. Modern UNIX command do a closely-related-set-of-things and do it well. Examples include :code:`apt-get` or :code:`git`. :param binary: the name of the command :param subcommand: the subcommand used :param args: positional arguments (put last) :param kwargs: options :returns: list of arguments that is suitable to be passed to :code:`subprocess.Popen` and friends. When specifying options, the following assumptions are made: * Option names begin with :code:`--` and any :code:`_` is assumed to be a :code:`-` * If the value is :code:`NO_VALUE`, this is a ""naked"" option. * If the value is a string or an int, these are presented as the value of the option. * If the value is a list, the option will be repeated multiple times. * If the value is a dict, the option will be repeated multiple times, and its values will be :code:`=`. """""" ret = [binary, subcommand] for key, value in kwargs.items(): key = '--' + key.replace('_', '-') ret.extend(_keyword_arguments(value, key)) ret.extend(args) return ret","Construct a command line for a ""modern UNIX"" command. Modern UNIX command do a closely-related-set-of-things and do it well. Examples include :code:`apt-get` or :code:`git`. :param binary: the name of the command :param subcommand: the subcommand used :param args: positional arguments (put last) :param kwargs: options :returns: list of arguments that is suitable to be passed to :code:`subprocess.Popen` and friends. When specifying options, the following assumptions are made: * Option names begin with :code:`--` and any :code:`_` is assumed to be a :code:`-` * If the value is :code:`NO_VALUE`, this is a ""naked"" option. * If the value is a string or an int, these are presented as the value of the option. * If the value is a list, the option will be repeated multiple times. * If the value is a dict, the option will be repeated multiple times, and its values will be :code:`=`." "def _openai_logging(self, epoch_result): """""" Use OpenAI logging facilities for the same type of logging """""" for key in sorted(epoch_result.keys()): if key == 'fps': # Not super elegant, but I like nicer display of FPS openai_logger.record_tabular(key, int(epoch_result[key])) else: openai_logger.record_tabular(key, epoch_result[key]) openai_logger.dump_tabular()",Use OpenAI logging facilities for the same type of logging "def api2_formula(req): """""" A simple `GET`-, URL-based API to OpenFisca, making the assumption of computing formulas for a single person. Combination ----------- You can compute several formulas at once by combining the paths and joining them with `+`. Example: ``` /salaire_super_brut+salaire_net_a_payer?salaire_de_base=1440 ``` This will compute both `salaire_super_brut` and `salaire_net_a_payer` in a single request. Reforms ----------- Reforms can be requested to patch the simulation system. To keep this endpoint URL simple, they are requested as a list in a custom HTTP header. ``` X-OpenFisca-Extensions: de_net_a_brut, landais_piketty_saez ``` This header is of course optional. URL size limit -------------- Using combination with a lot of parameters may lead to long URLs. If used within the browser, make sure the resulting URL is kept [under 2047 characters](http://stackoverflow.com/questions/417142) for cross-browser compatibility, by splitting combined requests. On a server, just test what your library handles. """""" API_VERSION = '2.1.0' wsgihelpers.track(req.url.decode('utf-8')) params = dict(req.GET) data = dict() try: extensions_header = req.headers.get('X-Openfisca-Extensions') tax_benefit_system = model.get_cached_composed_reform( reform_keys = extensions_header.split(','), tax_benefit_system = model.tax_benefit_system, ) if extensions_header is not None else model.tax_benefit_system params = normalize(params, tax_benefit_system) formula_names = req.urlvars.get('names').split('+') data['values'] = dict() data['period'] = parse_period(req.urlvars.get('period')) simulation = create_simulation(params, data['period'], tax_benefit_system) for formula_name in formula_names: column = get_column_from_formula_name(formula_name, tax_benefit_system) data['values'][formula_name] = compute(column.name, simulation) except Exception as error: if isinstance(error.args[0], dict): # we raised it ourselves, in this controller error = error.args[0] else: error = dict( message = unicode(error), code = 500 ) data['error'] = error finally: return respond(req, API_VERSION, data, params)","A simple `GET`-, URL-based API to OpenFisca, making the assumption of computing formulas for a single person. Combination ----------- You can compute several formulas at once by combining the paths and joining them with `+`. Example: ``` /salaire_super_brut+salaire_net_a_payer?salaire_de_base=1440 ``` This will compute both `salaire_super_brut` and `salaire_net_a_payer` in a single request. Reforms ----------- Reforms can be requested to patch the simulation system. To keep this endpoint URL simple, they are requested as a list in a custom HTTP header. ``` X-OpenFisca-Extensions: de_net_a_brut, landais_piketty_saez ``` This header is of course optional. URL size limit -------------- Using combination with a lot of parameters may lead to long URLs. If used within the browser, make sure the resulting URL is kept [under 2047 characters](http://stackoverflow.com/questions/417142) for cross-browser compatibility, by splitting combined requests. On a server, just test what your library handles." "def _skip_source(source): ''' Decide to skip source or not. :param source: :return: ''' if source.invalid: if source.uri and source.type and source.type in (""deb"", ""deb-src"", ""rpm"", ""rpm-src""): pieces = source.mysplit(source.line) if pieces[1].strip()[0] == ""["": options = pieces.pop(1).strip(""[]"").split() if options: log.debug(""Source %s will be included although is marked invalid"", source.uri) return False return True else: return True return False","Decide to skip source or not. :param source: :return:" "def source_attr(attr_name): """""" Creates a getter that will drop the current value and retrieve the source's attribute with specified name. @param attr_name: the name of an attribute belonging to the source. @type attr_name: str """""" def source_attr(_value, context, **_params): value = getattr(context[""model""].source, attr_name) return _attr(value) return source_attr","Creates a getter that will drop the current value and retrieve the source's attribute with specified name. @param attr_name: the name of an attribute belonging to the source. @type attr_name: str" "def add_upsert(self, value, criteria): """"""Add a tag or populator to the batch by value and criteria"""""" value = value.strip() v = value.lower() self.lower_val_to_val[v] = value criteria_array = self.upserts.get(v) if criteria_array is None: criteria_array = [] # start with # '{""value"": ""some_value"", ""criteria"": []}, ' self.upserts_size[v] = 31 + len(value) criteria_array.append(criteria.to_dict()) self.upserts[v] = criteria_array self.upserts_size[v] += criteria.json_size()",Add a tag or populator to the batch by value and criteria "def watchpoint_info(self, handle=0, index=-1): """"""Returns information about the specified watchpoint. Note: Either ``handle`` or ``index`` can be specified. If the ``index`` is not provided, the ``handle`` must be set, and vice-versa. If both ``index`` and ``handle`` are provided, the ``index`` overrides the provided ``handle``. Args: self (JLink): the ``JLink`` instance handle (int): optional handle of a valid watchpoint. index (int): optional index of a watchpoint. Returns: An instance of ``JLinkWatchpointInfo`` specifying information about the watchpoint if the watchpoint was found, otherwise ``None``. Raises: JLinkException: on error. ValueError: if both handle and index are invalid. """""" if index < 0 and handle == 0: raise ValueError('Handle must be provided if index is not set.') wp = structs.JLinkWatchpointInfo() res = self._dll.JLINKARM_GetWPInfoEx(index, ctypes.byref(wp)) if res < 0: raise errors.JLinkException('Failed to get watchpoint info.') for i in range(res): res = self._dll.JLINKARM_GetWPInfoEx(i, ctypes.byref(wp)) if res < 0: raise errors.JLinkException('Failed to get watchpoint info.') elif wp.Handle == handle or wp.WPUnit == index: return wp return None","Returns information about the specified watchpoint. Note: Either ``handle`` or ``index`` can be specified. If the ``index`` is not provided, the ``handle`` must be set, and vice-versa. If both ``index`` and ``handle`` are provided, the ``index`` overrides the provided ``handle``. Args: self (JLink): the ``JLink`` instance handle (int): optional handle of a valid watchpoint. index (int): optional index of a watchpoint. Returns: An instance of ``JLinkWatchpointInfo`` specifying information about the watchpoint if the watchpoint was found, otherwise ``None``. Raises: JLinkException: on error. ValueError: if both handle and index are invalid." "def call_historic(self, result_callback=None, kwargs=None, proc=None): """"""Call the hook with given ``kwargs`` for all registered plugins and for all plugins which will be registered afterwards. If ``result_callback`` is not ``None`` it will be called for for each non-None result obtained from a hook implementation. .. note:: The ``proc`` argument is now deprecated. """""" if proc is not None: warnings.warn( ""Support for `proc` argument is now deprecated and will be"" ""removed in an upcoming release."", DeprecationWarning, ) result_callback = proc self._call_history.append((kwargs or {}, result_callback)) # historizing hooks don't return results res = self._hookexec(self, self.get_hookimpls(), kwargs) if result_callback is None: return # XXX: remember firstresult isn't compat with historic for x in res or []: result_callback(x)","Call the hook with given ``kwargs`` for all registered plugins and for all plugins which will be registered afterwards. If ``result_callback`` is not ``None`` it will be called for for each non-None result obtained from a hook implementation. .. note:: The ``proc`` argument is now deprecated." "def indent(self, node, dirty=True): """"""Indent an item. Does nothing if the target has subitems. Args: node (gkeepapi.node.ListItem): Item to indent. dirty (bool): Whether this node should be marked dirty. """""" if node.subitems: return self._subitems[node.id] = node node.super_list_item_id = self.id node.parent_item = self if dirty: node.touch(True)","Indent an item. Does nothing if the target has subitems. Args: node (gkeepapi.node.ListItem): Item to indent. dirty (bool): Whether this node should be marked dirty." "def html_error_template(): """"""Provides a template that renders a stack trace in an HTML format, providing an excerpt of code as well as substituting source template filenames, line numbers and code for that of the originating source template, as applicable. The template's default ``encoding_errors`` value is ``'htmlentityreplace'``. The template has two options. With the ``full`` option disabled, only a section of an HTML document is returned. With the ``css`` option disabled, the default stylesheet won't be included. """""" import mako.template return mako.template.Template(r"""""" <%! from mako.exceptions import RichTraceback, syntax_highlight,\ pygments_html_formatter %> <%page args=""full=True, css=True, error=None, traceback=None""/> % if full: Mako Runtime Error % endif % if css: % endif % if full: % endif

Error !

<% tback = RichTraceback(error=error, traceback=traceback) src = tback.source line = tback.lineno if src: lines = src.split('\n') else: lines = None %>

${tback.errorname}: ${tback.message|h}

% if lines:
% for index in range(max(0, line-4),min(len(lines), line+5)): <% if pygments_html_formatter: pygments_html_formatter.linenostart = index + 1 %> % if index + 1 == line: <% if pygments_html_formatter: old_cssclass = pygments_html_formatter.cssclass pygments_html_formatter.cssclass = 'error ' + old_cssclass %> ${lines[index] | syntax_highlight(language='mako')} <% if pygments_html_formatter: pygments_html_formatter.cssclass = old_cssclass %> % else: ${lines[index] | syntax_highlight(language='mako')} % endif % endfor
% endif
% for (filename, lineno, function, line) in tback.reverse_traceback:
${filename}, line ${lineno}:
<% if pygments_html_formatter: pygments_html_formatter.linenostart = lineno %>
${line | syntax_highlight(filename)}
% endfor
% if full: % endif """""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')","Provides a template that renders a stack trace in an HTML format, providing an excerpt of code as well as substituting source template filenames, line numbers and code for that of the originating source template, as applicable. The template's default ``encoding_errors`` value is ``'htmlentityreplace'``. The template has two options. With the ``full`` option disabled, only a section of an HTML document is returned. With the ``css`` option disabled, the default stylesheet won't be included." "def _clean_item(self, item): ''' Cleans the item to be logged ''' item_copy = dict(item) del item_copy['body'] del item_copy['links'] del item_copy['response_headers'] del item_copy['request_headers'] del item_copy['status_code'] del item_copy['status_msg'] item_copy['action'] = 'ack' item_copy['logger'] = self.logger.name item_copy return item_copy",Cleans the item to be logged "def launch(self, callback_function=None): """""" If set, launches app related to the controller. """""" self._check_registered() self._socket_client.receiver_controller.launch_app( self.supporting_app_id, callback_function=callback_function)","If set, launches app related to the controller." "def numericise_all(input, empty2zero=False, default_blank="""", allow_underscores_in_numeric_literals=False): """"""Returns a list of numericised values from strings"""""" return [numericise(s, empty2zero, default_blank, allow_underscores_in_numeric_literals) for s in input]",Returns a list of numericised values from strings "def name(self): """"""AppProfile name used in requests. .. note:: This property will not change if ``app_profile_id`` does not, but the return value is not cached. The AppProfile name is of the form ``""projects/../instances/../app_profile/{app_profile_id}""`` :rtype: str :returns: The AppProfile name. """""" return self.instance_admin_client.app_profile_path( self._instance._client.project, self._instance.instance_id, self.app_profile_id, )","AppProfile name used in requests. .. note:: This property will not change if ``app_profile_id`` does not, but the return value is not cached. The AppProfile name is of the form ``""projects/../instances/../app_profile/{app_profile_id}""`` :rtype: str :returns: The AppProfile name." "def flux_production(F): r""""""Returns the net flux production for all states Parameters ---------- F : (n, n) ndarray Matrix of flux values between pairs of states. Returns ------- prod : (n) ndarray array with flux production (positive) or consumption (negative) at each state """""" influxes = np.array(np.sum(F, axis=0)).flatten() # all that flows in outfluxes = np.array(np.sum(F, axis=1)).flatten() # all that flows out prod = outfluxes - influxes # net flux into nodes return prod","r""""""Returns the net flux production for all states Parameters ---------- F : (n, n) ndarray Matrix of flux values between pairs of states. Returns ------- prod : (n) ndarray array with flux production (positive) or consumption (negative) at each state" "def load(self, addr, length=1): """""" Load one or more addresses. :param addr: byte address of load location :param length: All address from addr until addr+length (exclusive) are loaded (default: 1) """""" if addr is None: return elif not isinstance(addr, Iterable): self.first_level.load(addr, length=length) else: self.first_level.iterload(addr, length=length)","Load one or more addresses. :param addr: byte address of load location :param length: All address from addr until addr+length (exclusive) are loaded (default: 1)" "def spkcov(spk, idcode, cover=None): """""" Find the coverage window for a specified ephemeris object in a specified SPK file. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html :param spk: Name of SPK file. :type spk: str :param idcode: ID code of ephemeris object. :type idcode: int :param cover: Optional SPICE Window giving coverage in ""spk"" for ""idcode"". :type cover: spiceypy.utils.support_types.SpiceCell """""" spk = stypes.stringToCharP(spk) idcode = ctypes.c_int(idcode) if cover is None: cover = stypes.SPICEDOUBLE_CELL(2000) else: assert isinstance(cover, stypes.SpiceCell) assert cover.is_double() libspice.spkcov_c(spk, idcode, ctypes.byref(cover)) return cover","Find the coverage window for a specified ephemeris object in a specified SPK file. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html :param spk: Name of SPK file. :type spk: str :param idcode: ID code of ephemeris object. :type idcode: int :param cover: Optional SPICE Window giving coverage in ""spk"" for ""idcode"". :type cover: spiceypy.utils.support_types.SpiceCell" "def display(self): """"""Updates the widgets, especially based on length of recordings."""""" lg.debug('GraphicsScene is between {}s and {}s'.format(self.minimum, self.maximum)) x_scale = 1 / self.parent.value('overview_scale') lg.debug('Set scene x-scaling to {}'.format(x_scale)) self.scale(1 / self.transform().m11(), 1) # reset to 1 self.scale(x_scale, 1) self.scene = QGraphicsScene(self.minimum, 0, self.maximum, TOTAL_HEIGHT) self.setScene(self.scene) # reset annotations self.idx_markers = [] self.idx_annot = [] self.display_current() for name, pos in BARS.items(): item = QGraphicsRectItem(self.minimum, pos['pos0'], self.maximum, pos['pos1']) item.setToolTip(pos['tip']) self.scene.addItem(item) self.add_timestamps()","Updates the widgets, especially based on length of recordings." "def get_commit_tree(profile, sha): """"""Get the SHA of a commit's tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of a commit. Returns: The SHA of the commit's tree. """""" data = commits.get_commit(profile, sha) tree = data.get(""tree"") sha = tree.get(""sha"") return sha","Get the SHA of a commit's tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of a commit. Returns: The SHA of the commit's tree." "def annotate_and_average(self, gpl, expression_column, group_by_column, rename=True, force=False, merge_on_column=None, gsm_on=None, gpl_on=None): """"""Annotate GSM table with provided GPL. Args: gpl (:obj:`GEOTypes.GPL`): Platform for annotations expression_column (:obj:`str`): Column name which ""expressions"" are represented group_by_column (:obj:`str`): The data will be grouped and averaged over this column and only this column will be kept rename (:obj:`bool`): Rename output column to the self.name. Defaults to True. force (:obj:`bool`): If the name of the GPL does not match the platform name in GSM proceed anyway. Defaults to False. merge_on_column (:obj:`str`): Column to merge the data on. Defaults to None. gsm_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GSM. Defaults to None. gpl_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GPL. Defaults to None. Returns: :obj:`pandas.DataFrame`: Annotated data """""" if gpl.name != self.metadata['platform_id'][0] and not force: raise KeyError(""Platforms from GSM (%s) and from GPL (%s)"" % ( gpl.name, self.metadata['platform_id']) + "" are incompatible. Use force=True to use this GPL."") if merge_on_column is None and gpl_on is None and gsm_on is None: raise Exception(""You have to provide one of the two: "" ""merge_on_column or gpl_on and gsm_on parameters"") if merge_on_column: logger.info(""merge_on_column is not None. Using this option."") tmp_data = self.table.merge(gpl.table, on=merge_on_column, how='outer') tmp_data = tmp_data.groupby(group_by_column).mean()[ [expression_column]] else: if gpl_on is None or gsm_on is None: raise Exception(""Please provide both gpl_on and gsm_on or "" ""provide merge_on_column only"") tmp_data = self.table.merge(gpl.table, left_on=gsm_on, right_on=gpl_on, how='outer') tmp_data = tmp_data.groupby(group_by_column).mean()[ [expression_column]] if rename: tmp_data.columns = [self.name] return tmp_data","Annotate GSM table with provided GPL. Args: gpl (:obj:`GEOTypes.GPL`): Platform for annotations expression_column (:obj:`str`): Column name which ""expressions"" are represented group_by_column (:obj:`str`): The data will be grouped and averaged over this column and only this column will be kept rename (:obj:`bool`): Rename output column to the self.name. Defaults to True. force (:obj:`bool`): If the name of the GPL does not match the platform name in GSM proceed anyway. Defaults to False. merge_on_column (:obj:`str`): Column to merge the data on. Defaults to None. gsm_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GSM. Defaults to None. gpl_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GPL. Defaults to None. Returns: :obj:`pandas.DataFrame`: Annotated data" "def request( minion_id, dns_name=None, zone='default', request_id=None, country='US', state='California', loc='Palo Alto', org='Beta Organization', org_unit='Beta Group', password=None, zone_id=None, ): ''' Request a new certificate Uses the following command: .. code-block:: bash VCert enroll -z -k -cn CLI Example: .. code-block:: bash salt-run venafi.request ''' if password is not None: if password.startswith('sdb://'): password = __salt__['sdb.get'](password) if zone_id is None: zone_id = __opts__.get('venafi', {}).get('zone_id') if zone_id is None and zone is not None: zone_id = get_zone_id(zone) if zone_id is None: raise CommandExecutionError( 'Either a zone or a zone_id must be passed in or ' 'configured in the master file. This id can be retreived using ' 'venafi.show_company ' ) private_key = gen_key(minion_id, dns_name, zone, password) csr = gen_csr( minion_id, dns_name, zone=zone, country=country, state=state, loc=loc, org=org, org_unit=org_unit, password=password, ) pdata = salt.utils.json.dumps({ 'zoneId': zone_id, 'certificateSigningRequest': csr, }) qdata = __utils__['http.query']( '{0}/certificaterequests'.format(_base_url()), method='POST', data=pdata, decode=True, decode_type='json', header_dict={ 'tppl-api-key': _api_key(), 'Content-Type': 'application/json', }, ) request_id = qdata['dict']['certificateRequests'][0]['id'] ret = { 'request_id': request_id, 'private_key': private_key, 'csr': csr, 'zone': zone, } bank = 'venafi/domains' cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR) data = cache.fetch(bank, dns_name) if data is None: data = {} data.update({ 'minion_id': minion_id, 'request_id': request_id, 'private_key': private_key, 'zone': zone, 'csr': csr, }) cache.store(bank, dns_name, data) _id_map(minion_id, dns_name) return ret","Request a new certificate Uses the following command: .. code-block:: bash VCert enroll -z -k -cn CLI Example: .. code-block:: bash salt-run venafi.request " "def match_source(self, src): """"""Look for source or sources in the model that match the given source. Sources are matched by name and any association columns defined in the assoc_xmatch_columns parameter. """""" srcs = [] names = [src.name] for col in self.config['assoc_xmatch_columns']: if col in src.assoc and src.assoc[col]: names += [src.assoc[col]] for name in names: name = name.replace(' ', '').lower() if name not in self._src_dict: continue srcs += [s for s in self._src_dict[name] if s not in srcs] return srcs","Look for source or sources in the model that match the given source. Sources are matched by name and any association columns defined in the assoc_xmatch_columns parameter." "def on_make_toast(self, ref): """""" Using Toast.makeToast returns async so we have to initialize it later. """""" d = self.declaration self.toast = Toast(__id__=ref) self.init_widget()","Using Toast.makeToast returns async so we have to initialize it later." "def validate_key(self, activation_key): """""" Verify that the activation key is valid and within the permitted activation time window, returning the username if valid or raising ``ActivationError`` if not. """""" try: username = signing.loads( activation_key, salt=REGISTRATION_SALT, max_age=settings.ACCOUNT_ACTIVATION_DAYS * 86400 ) return username except signing.SignatureExpired: raise ActivationError( self.EXPIRED_MESSAGE, code='expired' ) except signing.BadSignature: raise ActivationError( self.INVALID_KEY_MESSAGE, code='invalid_key', params={'activation_key': activation_key} )","Verify that the activation key is valid and within the permitted activation time window, returning the username if valid or raising ``ActivationError`` if not." "def clean_title(title): """""" Clean title -> remove dates, remove duplicated spaces and strip title. Args: title (str): Title. Returns: str: Clean title without dates, duplicated, trailing and leading spaces. """""" date_pattern = re.compile(r'\W*' r'\d{1,2}' r'[/\-.]' r'\d{1,2}' r'[/\-.]' r'(?=\d*)(?:.{4}|.{2})' r'\W*') title = date_pattern.sub(' ', title) title = re.sub(r'\s{2,}', ' ', title) title = title.strip() return title","Clean title -> remove dates, remove duplicated spaces and strip title. Args: title (str): Title. Returns: str: Clean title without dates, duplicated, trailing and leading spaces." "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False): ''' Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}' ''' return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer, merge_lists)","Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'" "def _getsender(): """"""Return local sender. Don't use the getpass module, it looks at various environment variables and is unreliable. """""" import os import pwd import socket host = socket.gethostname() user = pwd.getpwuid(os.getuid())[0] return ""%s@%s"" % (user, host)","Return local sender. Don't use the getpass module, it looks at various environment variables and is unreliable." "def getParams(options=None): """"""return a string containing script parameters. Parameters are all variables that start with ``param_``. """""" result = [] if options: members = options.__dict__ for k, v in sorted(members.items()): result.append(""# %-40s: %s"" % (k, str(v))) else: vars = inspect.currentframe().f_back.f_locals for var in filter(lambda x: re.match(""param_"", x), vars.keys()): result.append(""# %-40s: %s"" % (var, str(vars[var]))) if result: return ""\n"".join(result) else: return ""# no parameters.""","return a string containing script parameters. Parameters are all variables that start with ``param_``." "def is_public(addr): # type: (Union[str, unicode]) -> bool """""" Check if the passed email registered at a free pubic mail server :param addr: email address to check :return: bool >>> is_public(""john@cmu.edu"") False >>> is_public(""john@gmail.com"") True """""" addr_domain = domain(addr) if not addr_domain: # anybody can use invalid email return True chunks = addr_domain.rsplit(""."", 1) return len(chunks) < 2 \ or addr_domain.endswith(""local"") \ or addr_domain in public_domains()","Check if the passed email registered at a free pubic mail server :param addr: email address to check :return: bool >>> is_public(""john@cmu.edu"") False >>> is_public(""john@gmail.com"") True" "def generate_bqm(graph, table, decision, linear_energy_ranges=None, quadratic_energy_ranges=None, min_classical_gap=2, precision=7, max_decision=8, max_variables=10, return_auxiliary=False): """"""Get a binary quadratic model with specific ground states. Args: graph (:obj:`~networkx.Graph`): Defines the structure of the generated binary quadratic model. table (iterable): Iterable of valid configurations (of spin-values). Each configuration is a tuple of variable assignments ordered by `decision`. decision (list/tuple): The variables in the binary quadratic model which have specified configurations. linear_energy_ranges (dict, optional): Dict of the form {v: (min, max, ...} where min and max are the range of values allowed to v. The default range is [-2, 2]. quadratic_energy_ranges (dict, optional): Dict of the form {(u, v): (min, max), ...} where min and max are the range of values allowed to (u, v). The default range is [-1, 1]. min_classical_gap (float): The minimum energy gap between the highest feasible state and the lowest infeasible state. precision (int, optional, default=7): Values returned by the optimization solver are rounded to `precision` digits of precision. max_decision (int, optional, default=4): Maximum number of decision variables allowed. The algorithm is valid for arbitrary sizes of problem but can be extremely slow. max_variables (int, optional, default=4): Maximum number of variables allowed. The algorithm is valid for arbitrary sizes of problem but can be extremely slow. return_auxiliary (bool, optional, False): If True, the auxiliary configurations are returned for each configuration in table. Returns: If return_auxiliary is False: :obj:`dimod.BinaryQuadraticModel`: The binary quadratic model. float: The classical gap. If return_auxiliary is True: :obj:`dimod.BinaryQuadraticModel`: The binary quadratic model. float: The classical gap. dict: The auxiliary configurations, keyed on the configurations in table. Raises: ImpossiblePenaltyModel: If the penalty model cannot be built. Normally due to a non-zero infeasible gap. """""" # Developer note: This function is input checking and output formatting. The logic is # in _generate_ising if not isinstance(graph, nx.Graph): raise TypeError(""expected input graph to be a NetworkX Graph."") if not set().union(*table).issubset({-1, 1}): raise ValueError(""expected table to be spin-valued"") if not isinstance(decision, list): decision = list(decision) # handle iterables if not all(v in graph for v in decision): raise ValueError(""given graph does not match the variable labels in decision variables"") num_var = len(decision) if any(len(config) != num_var for config in table): raise ValueError(""number of decision variables does not match all of the entires in the table"") if len(decision) > max_decision: raise ValueError((""The table is too large. Note that larger models can be built by setting "" ""max_decision to a higher number, but generation could be extremely slow."")) if len(graph) > max_variables: raise ValueError((""The graph is too large. Note that larger models can be built by setting "" ""max_variables to a higher number, but generation could be extremely slow."")) if linear_energy_ranges is None: linear_energy_ranges = defaultdict(lambda: (-2, 2)) if quadratic_energy_ranges is None: quadratic_energy_ranges = defaultdict(lambda: (-1, 1)) if not isinstance(table, Mapping): table = {config: 0. for config in table} h, J, offset, gap, aux = _generate_ising(graph, table, decision, min_classical_gap, linear_energy_ranges, quadratic_energy_ranges) bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) bqm.add_variables_from((v, round(bias, precision)) for v, bias in h.items()) bqm.add_interactions_from((u, v, round(bias, precision)) for (u, v), bias in J.items()) bqm.add_offset(round(offset, precision)) if return_auxiliary: return bqm, round(gap, precision), aux else: return bqm, round(gap, precision)","Get a binary quadratic model with specific ground states. Args: graph (:obj:`~networkx.Graph`): Defines the structure of the generated binary quadratic model. table (iterable): Iterable of valid configurations (of spin-values). Each configuration is a tuple of variable assignments ordered by `decision`. decision (list/tuple): The variables in the binary quadratic model which have specified configurations. linear_energy_ranges (dict, optional): Dict of the form {v: (min, max, ...} where min and max are the range of values allowed to v. The default range is [-2, 2]. quadratic_energy_ranges (dict, optional): Dict of the form {(u, v): (min, max), ...} where min and max are the range of values allowed to (u, v). The default range is [-1, 1]. min_classical_gap (float): The minimum energy gap between the highest feasible state and the lowest infeasible state. precision (int, optional, default=7): Values returned by the optimization solver are rounded to `precision` digits of precision. max_decision (int, optional, default=4): Maximum number of decision variables allowed. The algorithm is valid for arbitrary sizes of problem but can be extremely slow. max_variables (int, optional, default=4): Maximum number of variables allowed. The algorithm is valid for arbitrary sizes of problem but can be extremely slow. return_auxiliary (bool, optional, False): If True, the auxiliary configurations are returned for each configuration in table. Returns: If return_auxiliary is False: :obj:`dimod.BinaryQuadraticModel`: The binary quadratic model. float: The classical gap. If return_auxiliary is True: :obj:`dimod.BinaryQuadraticModel`: The binary quadratic model. float: The classical gap. dict: The auxiliary configurations, keyed on the configurations in table. Raises: ImpossiblePenaltyModel: If the penalty model cannot be built. Normally due to a non-zero infeasible gap." "def single_val(self): """"""return relative error of worst point that might make the data none symmetric. """""" sv_t = self._sv(self._tdsphere) sv_p = self._sv(self._tdsphere) return (sv_t, sv_p)","return relative error of worst point that might make the data none symmetric." "def labels(self): """""" Provide labels without the need of dockerd. Instead skopeo is being used. :return: dict """""" if self._labels is None: cmd = [""skopeo"", ""inspect"", self.skopeo_target] self._labels = json.loads(subprocess.check_output(cmd))[""Labels""] return self._labels","Provide labels without the need of dockerd. Instead skopeo is being used. :return: dict" "def rename_pipe(self, old_name, new_name): """"""Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe """""" if old_name not in self.pipe_names: raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names)) if new_name in self.pipe_names: raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names)) i = self.pipe_names.index(old_name) self.pipeline[i] = (new_name, self.pipeline[i][1])","Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe" "def get_teb_address(self): """""" Returns a remote pointer to the TEB. @rtype: int @return: Remote pointer to the L{TEB} structure. @raise WindowsError: An exception is raised on error. """""" try: return self._teb_ptr except AttributeError: try: hThread = self.get_handle(win32.THREAD_QUERY_INFORMATION) tbi = win32.NtQueryInformationThread( hThread, win32.ThreadBasicInformation) address = tbi.TebBaseAddress except WindowsError: address = self.get_linear_address('SegFs', 0) # fs:[0] if not address: raise self._teb_ptr = address return address","Returns a remote pointer to the TEB. @rtype: int @return: Remote pointer to the L{TEB} structure. @raise WindowsError: An exception is raised on error." "def month(self, value=None): """"""Corresponds to IDD Field `month` Args: value (int): value for IDD Field `month` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """""" if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `month`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `month`') if value > 12: raise ValueError('value need to be smaller 12 ' 'for field `month`') self._month = value","Corresponds to IDD Field `month` Args: value (int): value for IDD Field `month` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value" "def get_intent_filters(self, itemtype, name): """""" Find intent filters for a given item and name. Intent filter are attached to activities, services or receivers. You can search for the intent filters of such items and get a dictionary of all attached actions and intent categories. :param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver` :param name: the `android:name` of the parent item, e.g. activity name :returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items """""" d = {""action"": [], ""category"": []} for i in self.xml: # TODO: this can probably be solved using a single xpath for item in self.xml[i].findall("".//"" + itemtype): if self._format_value(item.get(self._ns(""name""))) == name: for sitem in item.findall("".//intent-filter""): for ssitem in sitem.findall(""action""): if ssitem.get(self._ns(""name"")) not in d[""action""]: d[""action""].append(ssitem.get(self._ns(""name""))) for ssitem in sitem.findall(""category""): if ssitem.get(self._ns(""name"")) not in d[""category""]: d[""category""].append(ssitem.get(self._ns(""name""))) if not d[""action""]: del d[""action""] if not d[""category""]: del d[""category""] return d","Find intent filters for a given item and name. Intent filter are attached to activities, services or receivers. You can search for the intent filters of such items and get a dictionary of all attached actions and intent categories. :param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver` :param name: the `android:name` of the parent item, e.g. activity name :returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items" "def swap(tokens, maxdist=2): """"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""""" assert maxdist >= 2 tokens = list(tokens) if maxdist > len(tokens): maxdist = len(tokens) l = len(tokens) for i in range(0,l - 1): for permutation in permutations(tokens[i:i+maxdist]): if permutation != tuple(tokens[i:i+maxdist]): newtokens = tokens[:i] newtokens += permutation newtokens += tokens[i+maxdist:] yield newtokens if maxdist == len(tokens): break","Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations." "def switch_onoff(self, device, status): """"""Switch a Socket"""""" if status == 1 or status == True or status == '1': return self.switch_on(device) else: return self.switch_off(device)",Switch a Socket "def json_dumps(self, data, **options): """""" Wrapper around `json.dumps` that uses a special JSON encoder. """""" params = {'sort_keys': True, 'indent': 2} params.update(options) # This code is based off django's built in JSON serializer if json.__version__.split('.') >= ['2', '1', '3']: # Use JS strings to represent Python Decimal instances (ticket #16850) params.update({'use_decimal': False}) return json.dumps(data, cls=DjangoJSONEncoder, **params)",Wrapper around `json.dumps` that uses a special JSON encoder. "def eqCoords(self, zerolat=False): """""" Returns the Equatorial Coordinates of this object. Receives a boolean parameter to consider a zero latitude. """""" lat = 0.0 if zerolat else self.lat return utils.eqCoords(self.lon, lat)","Returns the Equatorial Coordinates of this object. Receives a boolean parameter to consider a zero latitude." "def _get_xk(self, yk): '''Compute approximate solution from initial guess and approximate solution of the preconditioned linear system.''' if yk is not None: return self.x0 + self.linear_system.Mr * yk return self.x0","Compute approximate solution from initial guess and approximate solution of the preconditioned linear system." "def execute(self, input_data): ''' Okay this worker is going build graphs from PCAP Bro output logs ''' # Grab the Bro log handles from the input bro_logs = input_data['pcap_bro'] # Weird log if 'weird_log' in bro_logs: stream = self.workbench.stream_sample(bro_logs['weird_log']) self.weird_log_graph(stream) # HTTP log gsleep() stream = self.workbench.stream_sample(bro_logs['http_log']) self.http_log_graph(stream) # Files log gsleep() stream = self.workbench.stream_sample(bro_logs['files_log']) self.files_log_graph(stream) return {'output':'go to http://localhost:7474/browser and execute this query ""match (s:origin), (t:file), p=allShortestPaths((s)--(t)) return p""'}",Okay this worker is going build graphs from PCAP Bro output logs "def attach_binary(self, content, filename): """""" Attaches given binary data. :param bytes content: Binary data to be attached. :param str filename: :return: None. """""" content_type = guess_content_type(filename) payload = {""Name"": filename, ""Content"": b64encode(content).decode(""utf-8""), ""ContentType"": content_type} self.attach(payload)","Attaches given binary data. :param bytes content: Binary data to be attached. :param str filename: :return: None." "def promoter(self, up=2000, down=0): """""" Return a start, end tuple of positions for the promoter region of this gene Parameters ---------- up : int this distance upstream that is considered the promoter down : int the strand is used to add this many downstream bases into the gene. """""" if not self.is_gene_pred: return None return self.tss(up=up, down=down)","Return a start, end tuple of positions for the promoter region of this gene Parameters ---------- up : int this distance upstream that is considered the promoter down : int the strand is used to add this many downstream bases into the gene." "def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None): """""" Construct a `GKKPWork` from a `PhononWfkqWork` object. The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands """""" # Get list of qpoints from the the phonon tasks in this work qpoints = [] qpoints_deps = [] for task in phononwfkq_work: if isinstance(task,PhononTask): # Store qpoints qpt = task.input.get(""qpt"", [0,0,0]) qpoints.append(qpt) # Store dependencies qpoints_deps.append(task.deps) # Create file nodes ddb_path = phononwfkq_work.outdir.has_abiext(""DDB"") dvdb_path = phononwfkq_work.outdir.has_abiext(""DVDB"") ddb_file = FileNode(ddb_path) dvdb_file = FileNode(dvdb_path) # Get scf_task from first q-point for dep in qpoints_deps[0]: if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK': scf_task = dep.node # Create new work new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfk_task = [] # Add one eph task per qpoint for qpt,qpoint_deps in zip(qpoints,qpoints_deps): # Create eph task eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt) deps = {ddb_file: ""DDB"", dvdb_file: ""DVDB"" } for dep in qpoint_deps: deps[dep.node] = dep.exts[0] # If no WFQ in deps link the WFK with WFQ extension if 'WFQ' not in deps.values(): inv_deps = dict((v, k) for k, v in deps.items()) wfk_task = inv_deps['WFK'] wfk_path = wfk_task.outdir.has_abiext(""WFK"") # Check if netcdf filename, extension = os.path.splitext(wfk_path) infile = 'out_WFQ' + extension wfq_path = os.path.join(os.path.dirname(wfk_path), infile) if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path) deps[FileNode(wfq_path)] = 'WFQ' new.register_eph_task(eph_input, deps=deps) return new","Construct a `GKKPWork` from a `PhononWfkqWork` object. The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands" "def maybe(value): """"""Wraps an object with a Maybe instance. >>> maybe(""I'm a value"") Something(""I'm a value"") >>> maybe(None); Nothing Testing for value: >>> maybe(""I'm a value"").is_some() True >>> maybe(""I'm a value"").is_none() False >>> maybe(None).is_some() False >>> maybe(None).is_none() True Simplifying IF statements: >>> maybe(""I'm a value"").get() ""I'm a value"" >>> maybe(""I'm a value"").or_else(lambda: ""No value"") ""I'm a value"" >>> maybe(None).get() Traceback (most recent call last): ... NothingValueError: No such element >>> maybe(None).or_else(lambda: ""value"") 'value' >>> maybe(None).or_else(""value"") 'value' Wrap around values from object's attributes: class Person(object): def __init__(name): self.eran = name eran = maybe(Person('eran')) >>> eran.name Something('eran') >>> eran.phone_number Nothing >>> eran.phone_number.or_else('no phone number') 'no phone number' >>> maybe(4) + 8 Something(12) >>> maybe(4) - 2 Something(2) >>> maybe(4) * 2 Something(8) And methods: >>> maybe('VALUE').lower().get() 'value' >>> maybe(None).invalid().method().or_else('unknwon') 'unknwon' Enabled easily using NestedDictionaries without having to worry if a value is missing. For example lets assume we want to load some value from the following dictionary: nested_dict = maybe({ 'store': { 'name': 'MyStore', 'departments': { 'sales': { 'head_count': '10' } } } }) >>> nested_dict['store']['name'].get() 'MyStore' >>> nested_dict['store']['address'] Nothing >>> nested_dict['store']['address']['street'].or_else('No Address Specified') 'No Address Specified' >>> nested_dict['store']['address']['street'].or_none() is None True >>> nested_dict['store']['address']['street'].or_empty_list() [] >>> nested_dict['store']['departments']['sales']['head_count'].or_else('0') '10' >>> nested_dict['store']['departments']['marketing']['head_count'].or_else('0') '0' """""" if isinstance(value, Maybe): return value if value is not None: return Something(value) return Nothing()","Wraps an object with a Maybe instance. >>> maybe(""I'm a value"") Something(""I'm a value"") >>> maybe(None); Nothing Testing for value: >>> maybe(""I'm a value"").is_some() True >>> maybe(""I'm a value"").is_none() False >>> maybe(None).is_some() False >>> maybe(None).is_none() True Simplifying IF statements: >>> maybe(""I'm a value"").get() ""I'm a value"" >>> maybe(""I'm a value"").or_else(lambda: ""No value"") ""I'm a value"" >>> maybe(None).get() Traceback (most recent call last): ... NothingValueError: No such element >>> maybe(None).or_else(lambda: ""value"") 'value' >>> maybe(None).or_else(""value"") 'value' Wrap around values from object's attributes: class Person(object): def __init__(name): self.eran = name eran = maybe(Person('eran')) >>> eran.name Something('eran') >>> eran.phone_number Nothing >>> eran.phone_number.or_else('no phone number') 'no phone number' >>> maybe(4) + 8 Something(12) >>> maybe(4) - 2 Something(2) >>> maybe(4) * 2 Something(8) And methods: >>> maybe('VALUE').lower().get() 'value' >>> maybe(None).invalid().method().or_else('unknwon') 'unknwon' Enabled easily using NestedDictionaries without having to worry if a value is missing. For example lets assume we want to load some value from the following dictionary: nested_dict = maybe({ 'store': { 'name': 'MyStore', 'departments': { 'sales': { 'head_count': '10' } } } }) >>> nested_dict['store']['name'].get() 'MyStore' >>> nested_dict['store']['address'] Nothing >>> nested_dict['store']['address']['street'].or_else('No Address Specified') 'No Address Specified' >>> nested_dict['store']['address']['street'].or_none() is None True >>> nested_dict['store']['address']['street'].or_empty_list() [] >>> nested_dict['store']['departments']['sales']['head_count'].or_else('0') '10' >>> nested_dict['store']['departments']['marketing']['head_count'].or_else('0') '0'" "def _get_info_dir(): """"""Get path to directory in which to store info files. The directory returned by this function is ""owned"" by this module. If the contents of the directory are modified other than via the public functions of this module, subsequent behavior is undefined. The directory will be created if it does not exist. """""" path = os.path.join(tempfile.gettempdir(), "".tensorboard-info"") try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise else: os.chmod(path, 0o777) return path","Get path to directory in which to store info files. The directory returned by this function is ""owned"" by this module. If the contents of the directory are modified other than via the public functions of this module, subsequent behavior is undefined. The directory will be created if it does not exist." "def logline_timestamp_comparator(t1, t2): """"""Comparator for timestamps in logline format. Args: t1: Timestamp in logline format. t2: Timestamp in logline format. Returns: -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2. """""" dt1 = _parse_logline_timestamp(t1) dt2 = _parse_logline_timestamp(t2) for u1, u2 in zip(dt1, dt2): if u1 < u2: return -1 elif u1 > u2: return 1 return 0","Comparator for timestamps in logline format. Args: t1: Timestamp in logline format. t2: Timestamp in logline format. Returns: -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2." "def get_config_dir(): """""" Return tmuxp configuration directory. ``TMUXP_CONFIGDIR`` environmental variable has precedence if set. We also evaluate XDG default directory from XDG_CONFIG_HOME environmental variable if set or its default. Then the old default ~/.tmuxp is returned for compatibility. Returns ------- str : absolute path to tmuxp config directory """""" paths = [] if 'TMUXP_CONFIGDIR' in os.environ: paths.append(os.environ['TMUXP_CONFIGDIR']) if 'XDG_CONFIG_HOME' in os.environ: paths.append(os.environ['XDG_CONFIG_HOME']) else: paths.append('~/.config/tmuxp/') paths.append('~/.tmuxp') for path in paths: path = os.path.expanduser(path) if os.path.isdir(path): return path # Return last path as default if none of the previous ones matched return path","Return tmuxp configuration directory. ``TMUXP_CONFIGDIR`` environmental variable has precedence if set. We also evaluate XDG default directory from XDG_CONFIG_HOME environmental variable if set or its default. Then the old default ~/.tmuxp is returned for compatibility. Returns ------- str : absolute path to tmuxp config directory" "def get(cls, mjd: float, dbname: str = None) -> Eop: """"""Retrieve Earth Orientation Parameters and timescales differences for a given date Args: mjd: Date expressed as Mean Julian Date dbname: Name of the database to use Return: Eop: Interpolated data for this particuliar MJD """""" try: value = cls.db(dbname)[mjd] except (EopError, KeyError) as e: if isinstance(e, KeyError): msg = ""Missing EOP data for mjd = '%s'"" % e else: msg = str(e) if cls.policy() == cls.WARN: log.warning(msg) elif cls.policy() == cls.ERROR: raise value = Eop(x=0, y=0, dx=0, dy=0, deps=0, dpsi=0, lod=0, ut1_utc=0, tai_utc=0) return value","Retrieve Earth Orientation Parameters and timescales differences for a given date Args: mjd: Date expressed as Mean Julian Date dbname: Name of the database to use Return: Eop: Interpolated data for this particuliar MJD" "def is_descendant_of_bin(self, id_, bin_id): """"""Tests if an ``Id`` is a descendant of a bin. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if the ``id`` is a descendant of the ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``. """""" # Implemented from template for # osid.resource.BinHierarchySession.is_descendant_of_bin if self._catalog_session is not None: return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bin_id) return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bin_id)","Tests if an ``Id`` is a descendant of a bin. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if the ``id`` is a descendant of the ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``." "def graph_from_alphabet(alphabet, base): """"""Creates a graph that connects the base with the target through alphabets If every target is connected to any inputs, create the independent paths"""""" if not isinstance(alphabet, Choice): raise TypeError(alphabet.__class__.__name__) if not isinstance(base, Choice): raise TypeError(base.__class__.__name__) import networkx result = networkx.DiGraph() current_alphabet = alphabet pending_stack = set(current_alphabet) while pending_stack: current_alphabet = pending_stack.pop() if current_alphabet == base: continue if current_alphabet in base: result.add_edge(current_alphabet, base) elif isinstance(current_alphabet, Choice): for element in current_alphabet: if element in base: result.add_edge(current_alphabet, base) else: result.add_edge(current_alphabet, element) pending_stack.add(element) elif current_alphabet.alphabet: result.add_edge(current_alphabet, current_alphabet.alphabet) pending_stack.add(current_alphabet.alphabet) return result","Creates a graph that connects the base with the target through alphabets If every target is connected to any inputs, create the independent paths" "def set_acls(path, acls, version=-1, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): ''' Set acls on a znode path path to znode acls list of acl dictionaries to set on the znode version only set acls if version matches (Default: -1 (always matches)) profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.set_acls /test/name acls='[{""username"": ""gtmanfred"", ""password"": ""test"", ""all"": True}]' profile=prod ''' conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) if acls is None: acls = [] acls = [make_digest_acl(**acl) for acl in acls] conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) return conn.set_acls(path, acls, version)","Set acls on a znode path path to znode acls list of acl dictionaries to set on the znode version only set acls if version matches (Default: -1 (always matches)) profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.set_acls /test/name acls='[{""username"": ""gtmanfred"", ""password"": ""test"", ""all"": True}]' profile=prod" "def get_current_desktop(self): """""" Get the current desktop. Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec. """""" desktop = ctypes.c_long(0) _libxdo.xdo_get_current_desktop(self._xdo, ctypes.byref(desktop)) return desktop.value","Get the current desktop. Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec." "def bbox_vert_aligned_center(box1, box2): """""" Returns true if the center of both boxes is within 5 pts """""" if not (box1 and box2): return False return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5",Returns true if the center of both boxes is within 5 pts "def serialized_task(self, task: Task) -> Tuple[str, str]: """""" Returns the name of the task definition file and its contents. """""" return f""{task.hash}.json"", task.json",Returns the name of the task definition file and its contents. "def create_legacy_pad(scope, input_name, output_name, H_in, W_in, k_h, k_w, s_h, s_w, p_h, p_w, padded_value, container): ''' This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of CoreML' pooling under IncludeLastPixel padding mode. :param scope: :param input_name: :param output_name: :param H_in: input dimension along H-axis :param W_in: input dimension along W-axis :param k_h: kernel's H-axis dimension :param k_w: kernel's W-axis dimension :param s_h: stride along H-axis :param s_w: stride along W-axis :param p_h: padding amount at the beginning and the end of H-axis :param p_w: padding amount at the beginning and the end of W-axis :param padded_value: value used to fill padded area :param container: Container object ''' # Add a Pad operator to pre-process 4-D tensor pad_t, pad_b = calculate_legacy_pad_amount(H_in, p_h, k_h, s_h) pad_l, pad_r = calculate_legacy_pad_amount(W_in, p_w, k_w, s_w) # CoreML pooling operator pads only their H- and W-axes. Here we assume the shape of the tensor to be padded # is [N, C, H, W], so we have 8 padding amounts # pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index, # N_end_index, C_end_index, H_end_index, W_end_index] # Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros. pads = [0, 0, pad_t, pad_l, 0, 0, pad_b, pad_r] apply_pad(scope, input_name, output_name, container, pads=pads, value=padded_value)","This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of CoreML' pooling under IncludeLastPixel padding mode. :param scope: :param input_name: :param output_name: :param H_in: input dimension along H-axis :param W_in: input dimension along W-axis :param k_h: kernel's H-axis dimension :param k_w: kernel's W-axis dimension :param s_h: stride along H-axis :param s_w: stride along W-axis :param p_h: padding amount at the beginning and the end of H-axis :param p_w: padding amount at the beginning and the end of W-axis :param padded_value: value used to fill padded area :param container: Container object" "def _read_reference_information(self): """"""Reads the reference names and lengths"""""" ref_lengths = {} ref_names = [] for n in range(self._n_ref): l_name = struct.unpack(' 0: name = request.forms.filename if name is not None: filename = ""{0}.rst"".format(name) file_handle = open(filename, 'w') content = request.body.read() content = content.decode('utf-8') file_handle.write(content.encode('utf-8')) file_handle.close() return ""OK"" else: return abort(404)","Quick save a page. .. note:: this is a bottle view * this view must be called with the PUT method write the new page content to the file, and not not commit or redirect Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) Returns: bottle response object (200 OK)" "def model_to_dict(instance, **options): ""Takes a model instance and converts it into a dict."" options = _defaults(options) attrs = {} if options['prehook']: if isinstance(options['prehook'], collections.Callable): instance = options['prehook'](instance) if instance is None: return attrs # Items in the `fields` list are the output aliases, not the raw # accessors (field, method, property names) for alias in options['fields']: # Get the accessor for the object accessor = options['aliases'].get(alias, alias) # Create the key that will be used in the output dict key = options['prefix'] + alias # Optionally camelcase the key if options['camelcase']: key = convert_to_camel(key) # Get the field value. Use the mapped value to the actually property or # method name. `value` may be a number of things, so the various types # are checked below. value = get_field_value(instance, accessor, allow_missing=options['allow_missing']) # Related objects, perform some checks on their options if isinstance(value, (models.Model, QuerySet)): _options = _defaults(options['related'].get(accessor, {})) # If the `prefix` follows the below template, generate the # `prefix` for the related object if '%(accessor)s' in _options['prefix']: _options['prefix'] = _options['prefix'] % {'accessor': alias} if isinstance(value, models.Model): if len(_options['fields']) == 1 and _options['flat'] \ and not _options['merge']: value = list(serialize(value, **_options).values())[0] else: # Recurse, get the dict representation _attrs = serialize(value, **_options) # Check if this object should be merged into the parent, # otherwise nest it under the accessor name if _options['merge']: attrs.update(_attrs) continue value = _attrs else: value = serialize(value, **_options) attrs[key] = value # Apply post-hook to serialized attributes if options['posthook']: attrs = options['posthook'](instance, attrs) return attrs",Takes a model instance and converts it into a dict. "def is_parent_of_family(self, id_, family_id): """"""Tests if an ``Id`` is a direct parent of a family. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if this ``id`` is a parent of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """""" # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=family_id) return self._hierarchy_session.is_parent(id_=family_id, parent_id=id_)","Tests if an ``Id`` is a direct parent of a family. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if this ``id`` is a parent of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``." "def square_root_mod_prime( a, p ): """"""Modular square root of a, mod p, p prime."""""" # Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39. # This module has been tested for all values in [0,p-1] for # every prime p from 3 to 1229. assert 0 <= a < p assert 1 < p if a == 0: return 0 if p == 2: return a jac = jacobi( a, p ) if jac == -1: raise SquareRootError( ""%d has no square root modulo %d"" \ % ( a, p ) ) if p % 4 == 3: return modular_exp( a, (p+1)//4, p ) if p % 8 == 5: d = modular_exp( a, (p-1)//4, p ) if d == 1: return modular_exp( a, (p+3)//8, p ) if d == p-1: return ( 2 * a * modular_exp( 4*a, (p-5)//8, p ) ) % p raise RuntimeError(""Shouldn't get here."") for b in range( 2, p ): if jacobi( b*b-4*a, p ) == -1: f = ( a, -b, 1 ) ff = polynomial_exp_mod( ( 0, 1 ), (p+1)//2, f, p ) assert ff[1] == 0 return ff[0] raise RuntimeError(""No b found."")","Modular square root of a, mod p, p prime." "def oindex(a, selection): """"""Implementation of orthogonal indexing with slices and ints."""""" selection = replace_ellipsis(selection, a.shape) drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)]) selection = ix_(selection, a.shape) result = a[selection] if drop_axes: result = result.squeeze(axis=drop_axes) return result",Implementation of orthogonal indexing with slices and ints. "def _aggregation_op(cls, op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor], x: 'TensorFluent', vars_list: List[str]) -> 'TensorFluent': '''Returns a TensorFluent for the aggregation `op` applied to fluent `x`. Args: op: The aggregation operation. x: The input fluent. vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the aggregation operator's output. ''' axis = cls._varslist2axis(x, vars_list) t = op(x.tensor, axis) scope = [] for var in x.scope.as_list(): if var not in vars_list: scope.append(var) batch = x.batch return TensorFluent(t, scope, batch=batch)","Returns a TensorFluent for the aggregation `op` applied to fluent `x`. Args: op: The aggregation operation. x: The input fluent. vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the aggregation operator's output." "def _fields_list_to_dict(fields, option_name): """"""Takes a sequence of field names and returns a matching dictionary. [""a"", ""b""] becomes {""a"": 1, ""b"": 1} and [""a.b.c"", ""d"", ""a.c""] becomes {""a.b.c"": 1, ""d"": 1, ""a.c"": 1} """""" if isinstance(fields, collections.Mapping): return fields if isinstance(fields, collections.Sequence): if not all(isinstance(field, string_type) for field in fields): raise TypeError(""%s must be a list of key names, each an "" ""instance of %s"" % (option_name, string_type.__name__)) return dict.fromkeys(fields, 1) raise TypeError(""%s must be a mapping or "" ""list of key names"" % (option_name,))","Takes a sequence of field names and returns a matching dictionary. [""a"", ""b""] becomes {""a"": 1, ""b"": 1} and [""a.b.c"", ""d"", ""a.c""] becomes {""a.b.c"": 1, ""d"": 1, ""a.c"": 1}" "def _save_nb(nb_name): """""" Attempts to save notebook. If unsuccessful, shows a warning. """""" display(Javascript('IPython.notebook.save_checkpoint();')) display(Javascript('IPython.notebook.save_notebook();')) print('Saving notebook...', end=' ') if _wait_for_save(nb_name): print(""Saved '{}'."".format(nb_name)) else: logging.warning( ""Could not save your notebook (timed out waiting for "" ""IPython save). Make sure your notebook is saved "" ""and export again."" )","Attempts to save notebook. If unsuccessful, shows a warning." "def cli(ctx, packages, all, list, force, platform): """"""Install packages."""""" if packages: for package in packages: Installer(package, platform, force).install() elif all: # pragma: no cover packages = Resources(platform).packages for package in packages: Installer(package, platform, force).install() elif list: Resources(platform).list_packages(installed=True, notinstalled=True) else: click.secho(ctx.get_help())",Install packages. "def get_selected_object(self): """""" Gets the selected object in the treeview :return: """""" model, paths = self.tree_view.get_selection().get_selected_rows() if len(paths) == 1: return self.tree_store.get_iter(paths[0]), paths[0] else: return None, paths","Gets the selected object in the treeview :return:" "def get_next_line(self): """"""If we reach the end of the file, we simply open the next, until we \ run out of archives to process"""""" line = self.freq_file.readline().strip().split() if len(line) < 1: self.load_genotypes() line = self.freq_file.readline().strip().split() info_line = self.info_file.readline().strip().split() info = float(info_line[4]) exp_freq = float(info_line[3]) return line, info, exp_freq","If we reach the end of the file, we simply open the next, until we \ run out of archives to process" "def get_task_subtask_positions_objs(client, task_id): ''' Gets a list of the positions of a single task's subtasks Each task should (will?) only have one positions object defining how its subtasks are laid out ''' params = { 'task_id' : int(task_id) } response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params) return response.json()","Gets a list of the positions of a single task's subtasks Each task should (will?) only have one positions object defining how its subtasks are laid out" "def _GetMemberForOffset(self, offset): """"""Finds the member whose data includes the provided offset. Args: offset (int): offset in the uncompressed data to find the containing member for. Returns: gzipfile.GzipMember: gzip file member or None if not available. Raises: ValueError: if the provided offset is outside of the bounds of the uncompressed data. """""" if offset < 0 or offset >= self.uncompressed_data_size: raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format( offset, self.uncompressed_data_size)) for end_offset, member in iter(self._members_by_end_offset.items()): if offset < end_offset: return member return None","Finds the member whose data includes the provided offset. Args: offset (int): offset in the uncompressed data to find the containing member for. Returns: gzipfile.GzipMember: gzip file member or None if not available. Raises: ValueError: if the provided offset is outside of the bounds of the uncompressed data." "def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False): """""" Given an existing command line and parameterization this will return the same command line wrapped with the necessary calls to ``ssh-agent`` """""" if ssh_key_path: ssh_add_command = args2cmdline('ssh-add', ssh_key_path) if silence_ssh_add: ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null']) cmd = ' && '.join([ssh_add_command, args2cmdline('rm', '-f', ssh_key_path), args2cmdline(*args)]) args = ['ssh-agent'] if ssh_auth_sock: args.extend(['-a', ssh_auth_sock]) args.extend(['sh', '-c', cmd]) return args","Given an existing command line and parameterization this will return the same command line wrapped with the necessary calls to ``ssh-agent``" "async def connect(self, req: 'ClientRequest', traces: List['Trace'], timeout: 'ClientTimeout') -> Connection: """"""Get from pool or create new connection."""""" key = req.connection_key available = self._available_connections(key) # Wait if there are no available connections. if available <= 0: fut = self._loop.create_future() # This connection will now count towards the limit. waiters = self._waiters[key] waiters.append(fut) if traces: for trace in traces: await trace.send_connection_queued_start() try: await fut except BaseException as e: # remove a waiter even if it was cancelled, normally it's # removed when it's notified try: waiters.remove(fut) except ValueError: # fut may no longer be in list pass raise e finally: if not waiters: try: del self._waiters[key] except KeyError: # the key was evicted before. pass if traces: for trace in traces: await trace.send_connection_queued_end() proto = self._get(key) if proto is None: placeholder = cast(ResponseHandler, _TransportPlaceholder()) self._acquired.add(placeholder) self._acquired_per_host[key].add(placeholder) if traces: for trace in traces: await trace.send_connection_create_start() try: proto = await self._create_connection(req, traces, timeout) if self._closed: proto.close() raise ClientConnectionError(""Connector is closed."") except BaseException: if not self._closed: self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) self._release_waiter() raise else: if not self._closed: self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) if traces: for trace in traces: await trace.send_connection_create_end() else: if traces: for trace in traces: await trace.send_connection_reuseconn() self._acquired.add(proto) self._acquired_per_host[key].add(proto) return Connection(self, key, proto, self._loop)",Get from pool or create new connection. "def matches_glob_list(path, glob_list): """""" Given a list of glob patterns, returns a boolean indicating if a path matches any glob in the list """""" for glob in glob_list: try: if PurePath(path).match(glob): return True except TypeError: pass return False","Given a list of glob patterns, returns a boolean indicating if a path matches any glob in the list" "def cli(ctx, ftdi_enable, ftdi_disable, serial_enable, serial_disable): """"""Manage FPGA boards drivers."""""" exit_code = 0 if ftdi_enable: # pragma: no cover exit_code = Drivers().ftdi_enable() elif ftdi_disable: # pragma: no cover exit_code = Drivers().ftdi_disable() elif serial_enable: # pragma: no cover exit_code = Drivers().serial_enable() elif serial_disable: # pragma: no cover exit_code = Drivers().serial_disable() else: click.secho(ctx.get_help()) ctx.exit(exit_code)",Manage FPGA boards drivers. "def text2html_table(items:Collection[Collection[str]])->str: ""Put the texts in `items` in an HTML table, `widths` are the widths of the columns in %."" html_code = f"""""""""""" html_code += f"""""" \n \n"""""" for i in items[0]: html_code += f"" "" html_code += f"" \n \n "" html_code += "" "" for line in items[1:]: html_code += "" "" for i in line: html_code += f"" "" html_code += "" "" html_code += "" \n
{_treat_html(i)}
{_treat_html(i)}
"" return html_code","Put the texts in `items` in an HTML table, `widths` are the widths of the columns in %." "def get_value_matched_by_regex(field_name, regex_matches, string): """"""Ensure value stored in regex group exists."""""" try: value = regex_matches.group(field_name) if value is not None: return value except IndexError: pass raise MissingFieldError(string, field_name)",Ensure value stored in regex group exists. "def create_groups(self, *names, **kwargs): """"""Convenience method to create multiple groups in a single call."""""" return tuple(self.create_group(name, **kwargs) for name in names)",Convenience method to create multiple groups in a single call. "def get_values(self, dtype=None): """""" return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """""" if is_object_dtype(dtype): return self.values.astype(object) return self.values","return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations" "def encode_list(key, list_): # type: (str, Iterable) -> Dict[str, str] """""" Converts a list into a space-separated string and puts it in a dictionary :param key: Dictionary key to store the list :param list_: A list of objects :return: A dictionary key->string or an empty dictionary """""" if not list_: return {} return {key: "" "".join(str(i) for i in list_)}","Converts a list into a space-separated string and puts it in a dictionary :param key: Dictionary key to store the list :param list_: A list of objects :return: A dictionary key->string or an empty dictionary" "def load_stl_ascii(file_obj): """""" Load an ASCII STL file from a file object. Parameters ---------- file_obj: open file- like object Returns ---------- loaded: kwargs for a Trimesh constructor with keys: vertices: (n,3) float, vertices faces: (m,3) int, indexes of vertices face_normals: (m,3) float, normal vector of each face """""" # the first line is the header header = file_obj.readline() # make sure header is a string, not bytes if hasattr(header, 'decode'): try: header = header.decode('utf-8') except BaseException: header = '' # save header to metadata metadata = {'header': header} # read all text into one string text = file_obj.read() # convert bytes to string if hasattr(text, 'decode'): text = text.decode('utf-8') # split by endsolid keyword text = text.lower().split('endsolid')[0] # create array of splits blob = np.array(text.strip().split()) # there are 21 'words' in each face face_len = 21 # length of blob should be multiple of face_len if (len(blob) % face_len) != 0: raise HeaderError('Incorrect length STL file!') face_count = int(len(blob) / face_len) # this offset is to be added to a fixed set of tiled indices offset = face_len * np.arange(face_count).reshape((-1, 1)) normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset vertex_index = np.tile([8, 9, 10, 12, 13, 14, 16, 17, 18], (face_count, 1)) + offset # faces are groups of three sequential vertices faces = np.arange(face_count * 3).reshape((-1, 3)) face_normals = blob[normal_index].astype(' salt '*' pkg.remove SUNWgit salt '*' pkg.remove ,, salt '*' pkg.remove pkgs='[""foo"", ""bar""]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} try: if 'admin_source' in kwargs: adminfile = __salt__['cp.cache_file'](kwargs['admin_source'], saltenv) else: # Make tempfile to hold the adminfile contents. adminfile = _write_adminfile(kwargs) # Remove the package cmd = ['/usr/sbin/pkgrm', '-n', '-a', adminfile] + targets out = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='trace') if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) finally: # Remove the temp adminfile if 'admin_source' not in kwargs: try: os.remove(adminfile) except (NameError, OSError): pass return ret","Remove packages with pkgrm name The name of the package to be deleted By default salt automatically provides an adminfile, to automate package removal, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove salt '*' pkg.remove SUNWgit salt '*' pkg.remove ,, salt '*' pkg.remove pkgs='[""foo"", ""bar""]'" "def on_about_menu_open(self, widget): """"""Open About menu"""""" self.original_widget = urwid.Overlay(self.about_menu.main_window, self.original_widget, ('relative', self.left_margin), self.about_menu.get_size()[1], ('relative', self.top_margin), self.about_menu.get_size()[0])",Open About menu "def hash(self): """""" Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only. """""" renderer_str = ""{}|{}|{}|{}"".format( self.renderer.__class__.__name__, self.renderer.colormap, self.renderer.fill_value, self.renderer.background_color ) if isinstance(self.renderer, StretchedRenderer): renderer_str = ""{}|{}|{}"".format(renderer_str, self.renderer.method, self.renderer.colorspace) elif isinstance(self.renderer, UniqueValuesRenderer): renderer_str = ""{}|{}"".format(renderer_str, self.renderer.labels) return hash(""{}/{}/{}"".format(self.variable.pk, renderer_str, self.time_index))","Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only." "def _parse_plugin_id(self, plugin_id): """"""Determine repository from plugin id."""""" m = re.match(""(.+)(_\d{4}_\d{2}_\d{2}_)(.+)"", plugin_id) if m: return m.group(1) m = re.match(""(.+)(_release_)(.+)"", plugin_id) if m: return m.group(1) raise RuntimeError(""Repository could not be determined from "" ""the upgrade identifier: %s."" % plugin_id)",Determine repository from plugin id. "def cache_entry(self): """""" Returns a CacheEntry instance for File. """""" if self.storage_path is None: raise ValueError('This file is temporary and so a lal ' 'cache entry cannot be made') file_url = urlparse.urlunparse(['file', 'localhost', self.storage_path, None, None, None]) cache_entry = lal.utils.CacheEntry(self.ifo_string, self.tagged_description, self.segment_list.extent(), file_url) cache_entry.workflow_file = self return cache_entry",Returns a CacheEntry instance for File. "def load(stream, container=dict, comment_markers=_COMMENT_MARKERS): """""" Load and parse Java properties file given as a fiel or file-like object 'stream'. :param stream: A file or file like object of Java properties files :param container: Factory function to create a dict-like object to store properties :param comment_markers: Comment markers, e.g. '#' (hash) :return: Dict-like object holding properties >>> to_strm = anyconfig.compat.StringIO >>> s0 = ""calendar.japanese.type: LocalGregorianCalendar"" >>> load(to_strm('')) {} >>> load(to_strm(""# "" + s0)) {} >>> load(to_strm(""! "" + s0)) {} >>> load(to_strm(""calendar.japanese.type:"")) {'calendar.japanese.type': ''} >>> load(to_strm(s0)) {'calendar.japanese.type': 'LocalGregorianCalendar'} >>> load(to_strm(s0 + ""# ..."")) {'calendar.japanese.type': 'LocalGregorianCalendar# ...'} >>> s1 = r""key=a\\:b"" >>> load(to_strm(s1)) {'key': 'a:b'} >>> s2 = '''application/postscript: \\ ... x=Postscript File;y=.eps,.ps ... ''' >>> load(to_strm(s2)) {'application/postscript': 'x=Postscript File;y=.eps,.ps'} """""" ret = container() prev = """" for line in stream.readlines(): line = _pre_process_line(prev + line.strip().rstrip(), comment_markers) # I don't think later case may happen but just in case. if line is None or not line: continue prev = """" # re-initialize for later use. if line.endswith(""\\""): prev += line.rstrip("" \\"") continue (key, val) = _parseline(line) if key is None: LOGGER.warning(""Failed to parse the line: %s"", line) continue ret[key] = unescape(val) return ret","Load and parse Java properties file given as a fiel or file-like object 'stream'. :param stream: A file or file like object of Java properties files :param container: Factory function to create a dict-like object to store properties :param comment_markers: Comment markers, e.g. '#' (hash) :return: Dict-like object holding properties >>> to_strm = anyconfig.compat.StringIO >>> s0 = ""calendar.japanese.type: LocalGregorianCalendar"" >>> load(to_strm('')) {} >>> load(to_strm(""# "" + s0)) {} >>> load(to_strm(""! "" + s0)) {} >>> load(to_strm(""calendar.japanese.type:"")) {'calendar.japanese.type': ''} >>> load(to_strm(s0)) {'calendar.japanese.type': 'LocalGregorianCalendar'} >>> load(to_strm(s0 + ""# ..."")) {'calendar.japanese.type': 'LocalGregorianCalendar# ...'} >>> s1 = r""key=a\\:b"" >>> load(to_strm(s1)) {'key': 'a:b'} >>> s2 = '''application/postscript: \\ ... x=Postscript File;y=.eps,.ps ... ''' >>> load(to_strm(s2)) {'application/postscript': 'x=Postscript File;y=.eps,.ps'}" "def getImportFromObjects(node): '''Returns a list of objects referenced by import from node''' somenames = [x.asname for x in node.names if x.asname] othernames = [x.name for x in node.names if not x.asname] return somenames+othernames",Returns a list of objects referenced by import from node "def _create_gates(self, inputs, memory): """"""Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate. """""" # We'll create the input and forget gates at once. Hence, calculate double # the gate size. num_gates = 2 * self._calculate_gate_size() memory = tf.tanh(memory) inputs = basic.BatchFlatten()(inputs) gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs) gate_inputs = tf.expand_dims(gate_inputs, axis=1) gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory) gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2) input_gate, forget_gate = gates input_gate = tf.sigmoid(input_gate + self._input_bias) forget_gate = tf.sigmoid(forget_gate + self._forget_bias) return input_gate, forget_gate","Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate." "def _operator_symbol_handler(c, ctx): """"""Handles operator symbol values within s-expressions."""""" assert c in _OPERATORS ctx.set_unicode() val = ctx.value val.append(c) c, self = yield trans = ctx.immediate_transition(self) while c in _OPERATORS: val.append(c) c, _ = yield trans yield ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, val.as_symbol())",Handles operator symbol values within s-expressions. "def parse_singular_float(t, tag_name): '''Parses the sole floating point value with name tag_name in tag t. Heavy-handed with the asserts.''' pos = t.getElementsByTagName(tag_name) assert(len(pos) == 1) pos = pos[0] assert(len(pos.childNodes) == 1) return float(pos.childNodes[0].data)",Parses the sole floating point value with name tag_name in tag t. Heavy-handed with the asserts. "def info(name): ''' Get information about a service on the system Args: name (str): The name of the service. This is not the display name. Use ``get_service_name`` to find the service name. Returns: dict: A dictionary containing information about the service. CLI Example: .. code-block:: bash salt '*' service.info spooler ''' try: handle_scm = win32service.OpenSCManager( None, None, win32service.SC_MANAGER_CONNECT) except pywintypes.error as exc: raise CommandExecutionError( 'Failed to connect to the SCM: {0}'.format(exc.strerror)) try: handle_svc = win32service.OpenService( handle_scm, name, win32service.SERVICE_ENUMERATE_DEPENDENTS | win32service.SERVICE_INTERROGATE | win32service.SERVICE_QUERY_CONFIG | win32service.SERVICE_QUERY_STATUS) except pywintypes.error as exc: raise CommandExecutionError( 'Failed To Open {0}: {1}'.format(name, exc.strerror)) try: config_info = win32service.QueryServiceConfig(handle_svc) status_info = win32service.QueryServiceStatusEx(handle_svc) try: description = win32service.QueryServiceConfig2( handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION) except pywintypes.error: description = 'Failed to get description' delayed_start = win32service.QueryServiceConfig2( handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO) finally: win32service.CloseServiceHandle(handle_scm) win32service.CloseServiceHandle(handle_svc) ret = dict() try: sid = win32security.LookupAccountName( '', 'NT Service\\{0}'.format(name))[0] ret['sid'] = win32security.ConvertSidToStringSid(sid) except pywintypes.error: ret['sid'] = 'Failed to get SID' ret['BinaryPath'] = config_info[3] ret['LoadOrderGroup'] = config_info[4] ret['TagID'] = config_info[5] ret['Dependencies'] = config_info[6] ret['ServiceAccount'] = config_info[7] ret['DisplayName'] = config_info[8] ret['Description'] = description ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode'] ret['Status_CheckPoint'] = status_info['CheckPoint'] ret['Status_WaitHint'] = status_info['WaitHint'] ret['StartTypeDelayed'] = delayed_start flags = list() for bit in SERVICE_TYPE: if isinstance(bit, int): if config_info[0] & bit: flags.append(SERVICE_TYPE[bit]) ret['ServiceType'] = flags if flags else config_info[0] flags = list() for bit in SERVICE_CONTROLS: if status_info['ControlsAccepted'] & bit: flags.append(SERVICE_CONTROLS[bit]) ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted'] try: ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']] except KeyError: ret['Status_ExitCode'] = status_info['Win32ExitCode'] try: ret['StartType'] = SERVICE_START_TYPE[config_info[1]] except KeyError: ret['StartType'] = config_info[1] try: ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]] except KeyError: ret['ErrorControl'] = config_info[2] try: ret['Status'] = SERVICE_STATE[status_info['CurrentState']] except KeyError: ret['Status'] = status_info['CurrentState'] return ret","Get information about a service on the system Args: name (str): The name of the service. This is not the display name. Use ``get_service_name`` to find the service name. Returns: dict: A dictionary containing information about the service. CLI Example: .. code-block:: bash salt '*' service.info spooler" "def _login(**kwargs): ''' Log in to the API and generate the authentication token. .. versionadded:: 2016.3.0 :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: On success connargs dictionary with auth token and frontend url, False on failure. ''' connargs = dict() def _connarg(name, key=None): ''' Add key to connargs, only if name exists in our kwargs or, as zabbix. in __opts__ or __pillar__ Evaluate in said order - kwargs, opts, then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'connection_' (i.e. '_connection_user', etc.). Inspired by mysql salt module. ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = '_connection_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('zabbix.{0}'.format(name), None) if val is not None: connargs[key] = val _connarg('_connection_user', 'user') _connarg('_connection_password', 'password') _connarg('_connection_url', 'url') if 'url' not in connargs: connargs['url'] = _frontend_url() try: if connargs['user'] and connargs['password'] and connargs['url']: params = {'user': connargs['user'], 'password': connargs['password']} method = 'user.login' ret = _query(method, params, connargs['url']) auth = ret['result'] connargs['auth'] = auth connargs.pop('user', None) connargs.pop('password', None) return connargs else: raise KeyError except KeyError as err: raise SaltException('URL is probably not correct! ({})'.format(err))","Log in to the API and generate the authentication token. .. versionadded:: 2016.3.0 :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: On success connargs dictionary with auth token and frontend url, False on failure." "def get_friendly_name(cert, password): ''' Get the friendly name of the given certificate cert The certificate to install password The password for the certificate being installed formatted in the way described for openssl command in the PASS PHRASE ARGUMENTS section Note: The password given here will show up as plaintext in the returned job info. CLI Example: .. code-block:: bash salt '*' keychain.get_friendly_name /tmp/test.p12 test123 ''' cmd = 'openssl pkcs12 -in {0} -passin pass:{1} -info -nodes -nokeys 2> /dev/null | ' \ 'grep friendlyName:'.format(_quote(cert), _quote(password)) out = __salt__['cmd.run'](cmd, python_shell=True) return out.replace(""friendlyName: "", """").strip()","Get the friendly name of the given certificate cert The certificate to install password The password for the certificate being installed formatted in the way described for openssl command in the PASS PHRASE ARGUMENTS section Note: The password given here will show up as plaintext in the returned job info. CLI Example: .. code-block:: bash salt '*' keychain.get_friendly_name /tmp/test.p12 test123" "def merge(dest, src, merge_lists=False, in_place=True): ''' defaults.merge Allows deep merging of dicts in formulas. merge_lists : False If True, it will also merge lists instead of replace their items. in_place : True If True, it will merge into dest dict, if not it will make a new copy from that dict and return it. CLI Example: .. code-block:: bash salt '*' default.merge a=b d=e It is more typical to use this in a templating language in formulas, instead of directly on the command-line. ''' if in_place: merged = dest else: merged = copy.deepcopy(dest) return dictupdate.update(merged, src, merge_lists=merge_lists)","defaults.merge Allows deep merging of dicts in formulas. merge_lists : False If True, it will also merge lists instead of replace their items. in_place : True If True, it will merge into dest dict, if not it will make a new copy from that dict and return it. CLI Example: .. code-block:: bash salt '*' default.merge a=b d=e It is more typical to use this in a templating language in formulas, instead of directly on the command-line." "def _process_path_prefix(path_prefix): """"""Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None. """""" _validate_path(path_prefix) if not _GCS_PATH_PREFIX_REGEX.match(path_prefix): raise ValueError('Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix) bucket_name_end = path_prefix.find('/', 1) bucket = path_prefix prefix = None if bucket_name_end != -1: bucket = path_prefix[:bucket_name_end] prefix = path_prefix[bucket_name_end + 1:] or None return bucket, prefix","Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None." "def update_from_json(self, json_device): """"""Set all attributes based on API response."""""" self.identifier = json_device['Id'] self.license_plate = json_device['EquipmentHeader']['SerialNumber'] self.make = json_device['EquipmentHeader']['Make'] self.model = json_device['EquipmentHeader']['Model'] self.equipment_id = json_device['EquipmentHeader']['EquipmentID'] self.active = json_device['EngineRunning'] self.odo = json_device['Odometer'] self.latitude = json_device['Location']['Latitude'] self.longitude = json_device['Location']['Longitude'] self.altitude = json_device['Location']['Altitude'] self.speed = json_device['Speed'] self.last_seen = json_device['Location']['DateTime']",Set all attributes based on API response. "def fit(self, X, ranks, replicates=1, verbose=True): """""" Fits CP tensor decompositions for different choices of rank. Parameters ---------- X : array_like Real tensor ranks : int, or iterable iterable specifying number of components in each model replicates: int number of models to fit at each rank verbose : bool If True, prints summaries and optimization progress. """""" # Make ranks iterable if necessary. if not isinstance(ranks, collections.Iterable): ranks = (ranks,) # Iterate over model ranks, optimize multiple replicates at each rank. for r in ranks: # Initialize storage if r not in self.results: self.results[r] = [] # Display fitting progress. if verbose: itr = trange(replicates, desc='Fitting rank-{} models'.format(r), leave=False) else: itr = range(replicates) # Fit replicates. for i in itr: model_fit = self._fit_method(X, r, **self._fit_options) self.results[r].append(model_fit) # Print summary of results. if verbose: min_obj = min([res.obj for res in self.results[r]]) max_obj = max([res.obj for res in self.results[r]]) elapsed = sum([res.total_time for res in self.results[r]]) print('Rank-{} models: min obj, {:.2f}; ' 'max obj, {:.2f}; time to fit, ' '{:.1f}s'.format(r, min_obj, max_obj, elapsed)) # Sort results from lowest to largest loss. for r in ranks: idx = np.argsort([result.obj for result in self.results[r]]) self.results[r] = [self.results[r][i] for i in idx] # Align best model within each rank to best model of next larger rank. # Here r0 is the rank of the lower-dimensional model and r1 is the rank # of the high-dimensional model. for i in reversed(range(1, len(ranks))): r0, r1 = ranks[i-1], ranks[i] U = self.results[r0][0].factors V = self.results[r1][0].factors kruskal_align(U, V, permute_U=True) # For each rank, align everything to the best model for r in ranks: # store best factors U = self.results[r][0].factors # best model factors self.results[r][0].similarity = 1.0 # similarity to itself # align lesser fit models to best models for res in self.results[r][1:]: res.similarity = kruskal_align(U, res.factors, permute_V=True)","Fits CP tensor decompositions for different choices of rank. Parameters ---------- X : array_like Real tensor ranks : int, or iterable iterable specifying number of components in each model replicates: int number of models to fit at each rank verbose : bool If True, prints summaries and optimization progress." "def __fetch_heatmap_data_from_profile(self): """"""Method to create heatmap data from profile information."""""" # Read lines from file. with open(self.pyfile.path, ""r"") as file_to_read: for line in file_to_read: # Remove return char from the end of the line and add a # space in the beginning for better visibility. self.pyfile.lines.append("" "" + line.strip(""\n"")) # Total number of lines in file. self.pyfile.length = len(self.pyfile.lines) # Fetch line profiles. line_profiles = self.__get_line_profile_data() # Creating an array of data points. As the profile keys are 1 indexed # we should range from 1 to line_count + 1 and not 0 to line_count. arr = [] for line_num in range(1, self.pyfile.length + 1): if line_num in line_profiles: # line_profiles[i] will have multiple entries if line i is # invoked from multiple places in the code. Here we sum over # each invocation to get the total time spent on that line. line_times = [ ltime for _, ltime in line_profiles[line_num].values() ] arr.append([sum(line_times)]) else: arr.append([0.0]) # Create nd-array from list of data points. self.pyfile.data = np.array(arr)",Method to create heatmap data from profile information. "async def _process_2auth_form(self, html: str) -> (str, str): """""" Parsing two-factor authorization page and filling the code :param html: html page :return: url and html from redirected page """""" # Parse page p = TwoFactorCodePageParser() p.feed(html) p.close() # Prepare request data form_url = p.url form_data = dict(p.inputs) form_data['remember'] = 0 if p.message: raise VkAuthError('invalid_data', p.message, form_url, form_data) form_data['code'] = await self.enter_confirmation_code() # Send request url, html = await self.driver.post_text(form_url, form_data) return url, html","Parsing two-factor authorization page and filling the code :param html: html page :return: url and html from redirected page" "def extents(self): """""" The length, width, and height of the bounding box of the mesh. Returns ----------- extents : (3,) float Array containing axis aligned [length, width, height] """""" extents = self.bounds.ptp(axis=0) extents.flags.writeable = False return extents","The length, width, and height of the bounding box of the mesh. Returns ----------- extents : (3,) float Array containing axis aligned [length, width, height]" "def _create_font_size_combo(self): """"""Creates font size combo box"""""" self.std_font_sizes = config[""font_default_sizes""] font_size = str(get_default_font().GetPointSize()) self.font_size_combo = \ wx.ComboBox(self, -1, value=font_size, size=(60, -1), choices=map(unicode, self.std_font_sizes), style=wx.CB_DROPDOWN | wx.TE_PROCESS_ENTER) self.font_size_combo.SetToolTipString(_(u""Text size\n(points)"")) self.AddControl(self.font_size_combo) self.Bind(wx.EVT_COMBOBOX, self.OnTextSize, self.font_size_combo) self.Bind(wx.EVT_TEXT_ENTER, self.OnTextSize, self.font_size_combo)",Creates font size combo box "def url(self): """"""Gets the url for the resource this model represents. It will just use the 'href' passed in to the constructor if that exists. Otherwise, it will generated it based on the collection's url and the model's identifier. """""" if self._href is not None: return self._href if self.identifier: # for some reason atlas does not use classifications here in the path when considering one classification path = '/'.join([self.parent.url.replace('classifications/', 'classficiation/'), self.identifier]) return path raise exceptions.ClientError(""Not able to determine object URL"")","Gets the url for the resource this model represents. It will just use the 'href' passed in to the constructor if that exists. Otherwise, it will generated it based on the collection's url and the model's identifier." "def read_stats(self): """""" Read current ports statistics from chassis. :return: dictionary {port name {group name, {stat name: stat value}}} """""" self.statistics = TgnObjectsDict() for port in self.session.ports.values(): self.statistics[port] = port.read_port_stats() return self.statistics","Read current ports statistics from chassis. :return: dictionary {port name {group name, {stat name: stat value}}}" "def load_kb_mappings_file(kbname, kbfile, separator): """"""Add KB values from file to given KB returning rows added."""""" num_added = 0 with open(kbfile) as kb_fd: for line in kb_fd: if not line.strip(): continue try: key, value = line.split(separator) except ValueError: # bad split, pass current_app.logger.error(""Error splitting: {0}"".format(line)) continue add_kb_mapping(kbname, key, value) num_added += 1 return num_added",Add KB values from file to given KB returning rows added. "def NCBISequenceLinkURL(title, default=None): """""" Given a sequence title, like ""gi|42768646|gb|AY516849.1| Homo sapiens"", return the URL of a link to the info page at NCBI. title: the sequence title to produce a link URL for. default: the value to return if the title cannot be parsed. """""" try: ref = title.split('|')[3].split('.')[0] except IndexError: return default else: return 'http://www.ncbi.nlm.nih.gov/nuccore/%s' % (ref,)","Given a sequence title, like ""gi|42768646|gb|AY516849.1| Homo sapiens"", return the URL of a link to the info page at NCBI. title: the sequence title to produce a link URL for. default: the value to return if the title cannot be parsed." "def rle_encode(img:NPArrayMask)->str: ""Return run-length encoding string from `img`."" pixels = np.concatenate([[0], img.flatten() , [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x) for x in runs)",Return run-length encoding string from `img`. "def _find_dot_net_versions(self, bits): """""" Find Microsoft .NET Framework versions. Parameters ---------- bits: int Platform number of bits: 32 or 64. """""" # Find actual .NET version in registry reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' # Set .NET versions for specified MSVC++ version if self.vc_ver >= 12.0: frameworkver = (ver, 'v4.0') elif self.vc_ver >= 10.0: frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5') elif self.vc_ver == 9.0: frameworkver = ('v3.5', 'v2.0.50727') if self.vc_ver == 8.0: frameworkver = ('v3.0', 'v2.0.50727') return frameworkver","Find Microsoft .NET Framework versions. Parameters ---------- bits: int Platform number of bits: 32 or 64." "def _get_redirect_url(self, request): """""" Next gathered from session, then GET, then POST, then users absolute url. """""" if 'next' in request.session: next_url = request.session['next'] del request.session['next'] elif 'next' in request.GET: next_url = request.GET.get('next') elif 'next' in request.POST: next_url = request.POST.get('next') else: next_url = request.user.get_absolute_url() if not next_url: next_url = '/' return next_url","Next gathered from session, then GET, then POST, then users absolute url." "def set_query_latlon(self): ''' Sets the NCSS query location latitude and longitude. ''' if (isinstance(self.longitude, list) and isinstance(self.latitude, list)): self.lbox = True # west, east, south, north self.query.lonlat_box(self.longitude[0], self.longitude[1], self.latitude[0], self.latitude[1]) else: self.lbox = False self.query.lonlat_point(self.longitude, self.latitude)",Sets the NCSS query location latitude and longitude. "def load_credential_file(self, path): """"""Load a credential file as is setup like the Java utilities"""""" c_data = StringIO.StringIO() c_data.write(""[Credentials]\n"") for line in open(path, ""r"").readlines(): c_data.write(line.replace(""AWSAccessKeyId"", ""aws_access_key_id"").replace(""AWSSecretKey"", ""aws_secret_access_key"")) c_data.seek(0) self.readfp(c_data)",Load a credential file as is setup like the Java utilities "def close(self): """"""Close the _Stream object. No operation should be done on it afterwards. """""" if self.closed: return if self.mode == ""w"" and self.comptype != ""tar"": self.buf += self.cmp.flush() if self.mode == ""w"" and self.buf: self.fileobj.write(self.buf) self.buf = b"""" if self.comptype == ""gz"": # The native zlib crc is an unsigned 32-bit integer, but # the Python wrapper implicitly casts that to a signed C # long. So, on a 32-bit box self.crc may ""look negative"", # while the same crc on a 64-bit box may ""look positive"". # To avoid irksome warnings from the `struct` module, force # it to look positive on all boxes. self.fileobj.write(struct.pack("">> from pgmpy.factors.distributions import CustomDistribution >>> from scipy.stats import multivariate_normal >>> normal_pdf = lambda x1, x2: multivariate_normal.pdf( ... x=[x1, x2], mean=[0, 0], cov=[[1, 0], [0, 1]]) >>> normal_dist = CustomDistribution(variables=['x1', 'x2'], ... distribution=normal_pdf) >>> normal_dist.variables ['x1', 'x2'] >>> normal_dist.assignment(1, 1) 0.058549831524319168 >>> normal_dist.marginalize(['x2']) >>> normal_dist.variables ['x1'] >>> normal_dist.assignment(1) 0.24197072451914328 """""" if len(variables) == 0: raise ValueError(""Shouldn't be calling marginalize over no variable."") if not isinstance(variables, (list, tuple, np.ndarray)): raise TypeError(""variables: Expected type iterable, "" ""got: {var_type}"".format(var_type=type(variables))) for var in variables: if var not in self.variables: raise ValueError(""{var} not in scope."".format(var=var)) phi = self if inplace else self.copy() all_var = [var for var in self.variables] var_to_keep = [var for var in self.variables if var not in variables] reordered_var_index = [all_var.index(var) for var in variables + var_to_keep] pdf = phi._pdf # The arguments need to be reordered because integrate.nquad # integrates the first n-arguments of the function passed. def reordered_pdf(*args): # ordered_args restores the original order as it was in self.variables ordered_args = [args[reordered_var_index.index(index_id)] for index_id in range(len(all_var))] return pdf(*ordered_args) def marginalized_pdf(*args): return integrate.nquad(reordered_pdf, [[-np.inf, np.inf] for i in range(len(variables))], args=args)[0] phi._pdf = marginalized_pdf phi.variables = var_to_keep if not inplace: return phi","Marginalize the distribution with respect to the given variables. Parameters ---------- variables: list, array-like List of variables to be removed from the marginalized distribution. inplace: boolean If inplace=True it will modify the factor itself, else would return a new CustomDistribution instance. Returns ------- Marginalized distribution or None: if inplace=True (default) returns None if inplace=False returns a new CustomDistribution instance. Examples -------- >>> from pgmpy.factors.distributions import CustomDistribution >>> from scipy.stats import multivariate_normal >>> normal_pdf = lambda x1, x2: multivariate_normal.pdf( ... x=[x1, x2], mean=[0, 0], cov=[[1, 0], [0, 1]]) >>> normal_dist = CustomDistribution(variables=['x1', 'x2'], ... distribution=normal_pdf) >>> normal_dist.variables ['x1', 'x2'] >>> normal_dist.assignment(1, 1) 0.058549831524319168 >>> normal_dist.marginalize(['x2']) >>> normal_dist.variables ['x1'] >>> normal_dist.assignment(1) 0.24197072451914328" "def decrypt_dynamodb_item(item, crypto_config): # type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM """"""Decrypt a DynamoDB item. >>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item >>> encrypted_item = { ... 'some': {'B': b'ENCRYPTED_DATA'}, ... 'more': {'B': b'ENCRYPTED_DATA'} ... } >>> decrypted_item = decrypt_python_item( ... item=encrypted_item, ... crypto_config=my_crypto_config ... ) .. note:: This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client. :param dict item: Encrypted and signed DynamoDB item :param CryptoConfig crypto_config: Cryptographic configuration :returns: Plaintext DynamoDB item :rtype: dict """""" unique_actions = set([crypto_config.attribute_actions.default_action.name]) unique_actions.update(set([action.name for action in crypto_config.attribute_actions.attribute_actions.values()])) if crypto_config.attribute_actions.take_no_actions: # If we explicitly have been told not to do anything to this item, just copy it. return item.copy() try: signature_attribute = item.pop(ReservedAttributes.SIGNATURE.value) except KeyError: # The signature is always written, so if no signature is found then the item was not # encrypted or signed. raise DecryptionError(""No signature attribute found in item"") inner_crypto_config = crypto_config.copy() # Retrieve the material description from the item if found. try: material_description_attribute = item.pop(ReservedAttributes.MATERIAL_DESCRIPTION.value) except KeyError: # If no material description is found, we use inner_crypto_config as-is. pass else: # If material description is found, override the material description in inner_crypto_config. material_description = deserialize_material_description(material_description_attribute) inner_crypto_config.encryption_context.material_description = material_description decryption_materials = inner_crypto_config.decryption_materials() verify_item_signature(signature_attribute, item, decryption_materials.verification_key, inner_crypto_config) try: decryption_key = decryption_materials.decryption_key except AttributeError: if inner_crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN): raise DecryptionError( ""Attribute actions ask for some attributes to be decrypted but no decryption key is available"" ) return item.copy() decryption_mode = inner_crypto_config.encryption_context.material_description.get( MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value ) algorithm_descriptor = decryption_key.algorithm + decryption_mode # Once the signature has been verified, actually decrypt the item attributes. decrypted_item = {} for name, attribute in item.items(): if inner_crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN: decrypted_item[name] = decrypt_attribute( attribute_name=name, attribute=attribute, decryption_key=decryption_key, algorithm=algorithm_descriptor ) else: decrypted_item[name] = attribute.copy() return decrypted_item","Decrypt a DynamoDB item. >>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item >>> encrypted_item = { ... 'some': {'B': b'ENCRYPTED_DATA'}, ... 'more': {'B': b'ENCRYPTED_DATA'} ... } >>> decrypted_item = decrypt_python_item( ... item=encrypted_item, ... crypto_config=my_crypto_config ... ) .. note:: This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client. :param dict item: Encrypted and signed DynamoDB item :param CryptoConfig crypto_config: Cryptographic configuration :returns: Plaintext DynamoDB item :rtype: dict" "def mmcif(self): """""" Filepath for mmcif file associated with code. Notes ----- Downloads mmcif file if not already present. Returns ------- mmcif_file : str Filepath for the mmcif file. """""" mmcif_dir = os.path.join(self.parent_dir, 'mmcif') if not os.path.exists(mmcif_dir): os.makedirs(mmcif_dir) mmcif_file_name = '{0}.cif'.format(self.code) mmcif_file = os.path.join(mmcif_dir, mmcif_file_name) if not os.path.exists(mmcif_file): get_mmcif(code=self.code, outfile=mmcif_file) return mmcif_file","Filepath for mmcif file associated with code. Notes ----- Downloads mmcif file if not already present. Returns ------- mmcif_file : str Filepath for the mmcif file." "def blit(self, surface, pos=(0, 0)): """""" Blits a surface on the screen at pos :param surface: Surface to blit :param pos: Top left corner to start blitting :type surface: Surface :type pos: tuple """""" for x in range(surface.width): for y in range(surface.height): point = (x + pos[0], y + pos[1]) if self.point_on_screen(point): self.matrix[point[0]][point[1]] = surface.matrix[x][y]","Blits a surface on the screen at pos :param surface: Surface to blit :param pos: Top left corner to start blitting :type surface: Surface :type pos: tuple" "def get_actor_by_ain(self, ain): """""" Return a actor identified by it's ain or return None """""" for actor in self.get_actors(): if actor.actor_id == ain: return actor",Return a actor identified by it's ain or return None "def _build_query(self, uri, params=None, action_token_type=None): """"""Prepare query string"""""" if params is None: params = QueryParams() params['response_format'] = 'json' session_token = None if action_token_type in self._action_tokens: # Favor action token using_action_token = True session_token = self._action_tokens[action_token_type] else: using_action_token = False if self._session: session_token = self._session['session_token'] if session_token: params['session_token'] = session_token # make order of parameters predictable for testing keys = list(params.keys()) keys.sort() query = urlencode([tuple([key, params[key]]) for key in keys]) if not using_action_token and self._session: secret_key_mod = int(self._session['secret_key']) % 256 signature_base = (str(secret_key_mod) + self._session['time'] + uri + '?' + query).encode('ascii') query += '&signature=' + hashlib.md5(signature_base).hexdigest() return query",Prepare query string "def calibrate_cameras(self): """"""Calibrate cameras based on found chessboard corners."""""" criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5) flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH) calib = StereoCalibration() (calib.cam_mats[""left""], calib.dist_coefs[""left""], calib.cam_mats[""right""], calib.dist_coefs[""right""], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat) = cv2.stereoCalibrate(self.object_points, self.image_points[""left""], self.image_points[""right""], self.image_size, calib.cam_mats[""left""], calib.dist_coefs[""left""], calib.cam_mats[""right""], calib.dist_coefs[""right""], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat, criteria=criteria, flags=flags)[1:] (calib.rect_trans[""left""], calib.rect_trans[""right""], calib.proj_mats[""left""], calib.proj_mats[""right""], calib.disp_to_depth_mat, calib.valid_boxes[""left""], calib.valid_boxes[""right""]) = cv2.stereoRectify(calib.cam_mats[""left""], calib.dist_coefs[""left""], calib.cam_mats[""right""], calib.dist_coefs[""right""], self.image_size, calib.rot_mat, calib.trans_vec, flags=0) for side in (""left"", ""right""): (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap( calib.cam_mats[side], calib.dist_coefs[side], calib.rect_trans[side], calib.proj_mats[side], self.image_size, cv2.CV_32FC1) # This is replaced because my results were always bad. Estimates are # taken from the OpenCV samples. width, height = self.image_size focal_length = 0.8 * width calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]) return calib",Calibrate cameras based on found chessboard corners. "def check_enabled(self): """"""This method will be used to verify that a plugin should execute given the condition of the underlying environment. The default implementation will return True if none of class.files, class.packages, nor class.commands is specified. If any of these is specified the plugin will check for the existence of any of the corresponding paths, packages or commands and return True if any are present. For SCLPlugin subclasses, it will check whether the plugin can be run for any of installed SCLs. If so, it will store names of these SCLs on the plugin class in addition to returning True. For plugins with more complex enablement checks this method may be overridden. """""" # some files or packages have been specified for this package if any([self.files, self.packages, self.commands, self.kernel_mods, self.services]): if isinstance(self.files, six.string_types): self.files = [self.files] if isinstance(self.packages, six.string_types): self.packages = [self.packages] if isinstance(self.commands, six.string_types): self.commands = [self.commands] if isinstance(self.kernel_mods, six.string_types): self.kernel_mods = [self.kernel_mods] if isinstance(self.services, six.string_types): self.services = [self.services] if isinstance(self, SCLPlugin): # save SCLs that match files or packages type(self)._scls_matched = [] for scl in self._get_scls(): files = [f % {""scl_name"": scl} for f in self.files] packages = [p % {""scl_name"": scl} for p in self.packages] commands = [c % {""scl_name"": scl} for c in self.commands] services = [s % {""scl_name"": scl} for s in self.services] if self._check_plugin_triggers(files, packages, commands, services): type(self)._scls_matched.append(scl) return len(type(self)._scls_matched) > 0 return self._check_plugin_triggers(self.files, self.packages, self.commands, self.services) if isinstance(self, SCLPlugin): # if files and packages weren't specified, we take all SCLs type(self)._scls_matched = self._get_scls() return True","This method will be used to verify that a plugin should execute given the condition of the underlying environment. The default implementation will return True if none of class.files, class.packages, nor class.commands is specified. If any of these is specified the plugin will check for the existence of any of the corresponding paths, packages or commands and return True if any are present. For SCLPlugin subclasses, it will check whether the plugin can be run for any of installed SCLs. If so, it will store names of these SCLs on the plugin class in addition to returning True. For plugins with more complex enablement checks this method may be overridden." "def address(self) -> str: """"""Generate a random full address. :return: Full address. """""" fmt = self._data['address_fmt'] st_num = self.street_number() st_name = self.street_name() if self.locale in SHORTENED_ADDRESS_FMT: return fmt.format( st_num=st_num, st_name=st_name, ) if self.locale == 'ja': return fmt.format( self.random.choice(self._data['city']), # Generate list of random integers # in amount of 3, from 1 to 100. *self.random.randints(amount=3, a=1, b=100), ) return fmt.format( st_num=st_num, st_name=st_name, st_sfx=self.street_suffix(), )","Generate a random full address. :return: Full address." "def _add_compounds(self, variant_obj, info_dict): """"""Check if there are any compounds and add them to the variant The compounds that are added should be sorted on rank score """""" compound_list = [] compound_entry = info_dict.get('Compounds') if compound_entry: for family_annotation in compound_entry.split(','): compounds = family_annotation.split(':')[-1].split('|') for compound in compounds: splitted_compound = compound.split('>') compound_score = None if len(splitted_compound) > 1: compound_id = splitted_compound[0] compound_score = int(splitted_compound[-1]) compound_list.append(Compound( variant_id=compound_id, combined_score=compound_score ) ) #Sort the compounds based on rank score compound_list.sort(key = operator.attrgetter('combined_score'), reverse=True) for compound in compound_list: variant_obj.add_compound(compound)","Check if there are any compounds and add them to the variant The compounds that are added should be sorted on rank score" "def clean_single_dict(indict, prepend_to_keys=None, remove_keys_containing=None): """"""Clean a dict with values that contain single item iterators to single items Args: indict (dict): Dictionary to be cleaned prepend_to_keys (str): String to prepend to all keys remove_keys_containing (str): Text to check for in keys to ignore Returns: dict: Cleaned dictionary Examples: >>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}) {'test1': 1, 'test2': 'H'} >>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_') {'struct_test1': 1, 'struct_test2': 'H'} >>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore') {'struct_test1': 1} """""" if not prepend_to_keys: prepend_to_keys = '' outdict = {} for k, v in indict.items(): if remove_keys_containing: if remove_keys_containing in k: continue outdict[prepend_to_keys + k] = v[0] return outdict","Clean a dict with values that contain single item iterators to single items Args: indict (dict): Dictionary to be cleaned prepend_to_keys (str): String to prepend to all keys remove_keys_containing (str): Text to check for in keys to ignore Returns: dict: Cleaned dictionary Examples: >>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}) {'test1': 1, 'test2': 'H'} >>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_') {'struct_test1': 1, 'struct_test2': 'H'} >>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore') {'struct_test1': 1}" "def pyephem_earthsun_distance(time): """""" Calculates the distance from the earth to the sun using pyephem. Parameters ---------- time : pd.DatetimeIndex Returns ------- pd.Series. Earth-sun distance in AU. """""" import ephem sun = ephem.Sun() earthsun = [] for thetime in time: sun.compute(ephem.Date(thetime)) earthsun.append(sun.earth_distance) return pd.Series(earthsun, index=time)","Calculates the distance from the earth to the sun using pyephem. Parameters ---------- time : pd.DatetimeIndex Returns ------- pd.Series. Earth-sun distance in AU." "def temporal_from_rdf(period_of_time): '''Failsafe parsing of a temporal coverage''' try: if isinstance(period_of_time, Literal): return temporal_from_literal(str(period_of_time)) elif isinstance(period_of_time, RdfResource): return temporal_from_resource(period_of_time) except Exception: # There are a lot of cases where parsing could/should fail # but we never want to break the whole dataset parsing # so we log the error for future investigation and improvement log.warning('Unable to parse temporal coverage', exc_info=True)",Failsafe parsing of a temporal coverage "def asgray(im): """""" Takes an image and returns its grayscale version by averaging the color channels. if an alpha channel is present, it will simply be ignored. If a grayscale image is given, the original image is returned. Parameters ---------- image : ndarray, ndim 2 or 3 RGB or grayscale image. Returns ------- gray_image : ndarray, ndim 2 Grayscale version of image. """""" if im.ndim == 2: return im elif im.ndim == 3 and im.shape[2] in (3, 4): return im[..., :3].mean(axis=-1) else: raise ValueError('Invalid image format')","Takes an image and returns its grayscale version by averaging the color channels. if an alpha channel is present, it will simply be ignored. If a grayscale image is given, the original image is returned. Parameters ---------- image : ndarray, ndim 2 or 3 RGB or grayscale image. Returns ------- gray_image : ndarray, ndim 2 Grayscale version of image." "def provision_system_config(items, database_name, overwrite=False, clear=False, skip_user_check=False): """"""Provision a basic system configuration"""""" from hfos.provisions.base import provisionList from hfos.database import objectmodels default_system_config_count = objectmodels['systemconfig'].count({ 'name': 'Default System Configuration'}) if default_system_config_count == 0 or (clear or overwrite): provisionList([SystemConfiguration], 'systemconfig', overwrite, clear, skip_user_check) hfoslog('Provisioning: System: Done.', emitter='PROVISIONS') else: hfoslog('Default system configuration already present.', lvl=warn, emitter='PROVISIONS')",Provision a basic system configuration "def parse_info(response): ""Parse the result of Redis's INFO command into a Python dict"" info = {} response = nativestr(response) def get_value(value): if ',' not in value or '=' not in value: try: if '.' in value: return float(value) else: return int(value) except ValueError: return value else: sub_dict = {} for item in value.split(','): k, v = item.rsplit('=', 1) sub_dict[k] = get_value(v) return sub_dict for line in response.splitlines(): if line and not line.startswith('#'): if line.find(':') != -1: key, value = line.split(':', 1) info[key] = get_value(value) else: # if the line isn't splittable, append it to the ""__raw__"" key info.setdefault('__raw__', []).append(line) return info",Parse the result of Redis's INFO command into a Python dict "def get_background_sids(self, src_filter): """""" We can apply the filtering of the background sites as a pre-processing step - this is done here rather than in the sampling of the ruptures themselves """""" branch_key = self.idx_set[""grid_key""] idist = src_filter.integration_distance(DEFAULT_TRT) with h5py.File(self.source_file, 'r') as hdf5: bg_locations = hdf5[""Grid/Locations""].value distances = min_geodetic_distance( src_filter.sitecol.xyz, (bg_locations[:, 0], bg_locations[:, 1])) # Add buffer equal to half of length of median area from Mmax mmax_areas = self.msr.get_median_area( hdf5[""/"".join([""Grid"", branch_key, ""MMax""])].value, 0.0) # for instance hdf5['Grid/FM0_0_MEANFS_MEANMSR/MMax'] mmax_lengths = numpy.sqrt(mmax_areas / self.aspect) ok = distances <= (0.5 * mmax_lengths + idist) # get list of indices from array of booleans return numpy.where(ok)[0].tolist()","We can apply the filtering of the background sites as a pre-processing step - this is done here rather than in the sampling of the ruptures themselves" "def get_segments(self): """"""Get segments for analysis. Creates instance of trans.Segments."""""" # Chunking chunk = {k: v.isChecked() for k, v in self.chunk.items()} lock_to_staging = self.lock_to_staging.get_value() epoch_dur = self.epoch_param['dur'].get_value() epoch_overlap = self.epoch_param['overlap_val'].value() epoch_step = None epoch = None if chunk['epoch']: if lock_to_staging: epoch = 'locked' else: epoch = 'unlocked' if self.epoch_param['step'].isChecked(): epoch_step = self.epoch_param['step_val'].get_value() if epoch_step <= 0: epoch_step = 0.1 # Which channel(s) self.chan = self.get_channels() # chan name without group if not self.chan: return # Which event type(s) chan_full = None evt_type = None if chunk['event']: if self.evt_chan_only.get_value(): chan_full = [i + ' (' + self.idx_group.currentText() + '' ')' for i in self.chan] evt_type = self.idx_evt_type.selectedItems() if not evt_type: return else: evt_type = [x.text() for x in evt_type] # Which cycle(s) cycle = self.cycle = self.get_cycles() # Which stage(s) stage = self.idx_stage.selectedItems() if not stage: stage = self.stage = None else: stage = self.stage = [ x.text() for x in self.idx_stage.selectedItems()] # Concatenation cat = {k: v.get_value() for k, v in self.cat.items()} cat = (int(cat['cycle']), int(cat['stage']), int(cat['discontinuous']), int(cat['evt_type'])) # Artefact event rejection reject_event = self.reject_event.get_value() if reject_event == 'channel-specific': chan_full = [i + ' (' + self.idx_group.currentText() + '' ')' for i in self.chan] reject_artf = True elif reject_event == 'from any channel': reject_artf = True else: reject_artf = False # Other options min_dur = self.min_dur.get_value() reject_epoch = self.reject_epoch.get_value() # Generate title for summary plot self.title = self.make_title(chan_full, cycle, stage, evt_type) segments = fetch(self.parent.info.dataset, self.parent.notes.annot, cat=cat, evt_type=evt_type, stage=stage, cycle=cycle, chan_full=chan_full, epoch=epoch, epoch_dur=epoch_dur, epoch_overlap=epoch_overlap, epoch_step=epoch_step, reject_epoch=reject_epoch, reject_artf=reject_artf, min_dur=min_dur) return segments",Get segments for analysis. Creates instance of trans.Segments. "def _get(self, i): """""" Retrieve data for the ith file in the dataset. """""" with open(os.path.join(self.path, self.elem, self.files[i]), 'r') as f: # JSTOR hasn't always produced valid XML. contents = re.sub('(&)(?!amp;)', lambda match: '&', f.read()) root = ET.fromstring(contents) doi = root.attrib['id'] if self.K: # Keys only. return doi grams = [] for gram in root.findall(self.elem_xml): text = unidecode(unicode(gram.text.strip())) if ( not self.ignore_hash or '#' not in list(text) ): c = ( text, number(gram.attrib['weight']) ) grams.append(c) if self.V: # Values only. return grams return doi, grams",Retrieve data for the ith file in the dataset. "def get_flight_rules(vis: Number, ceiling: Cloud) -> int: """""" Returns int based on current flight rules from parsed METAR data 0=VFR, 1=MVFR, 2=IFR, 3=LIFR Note: Common practice is to report IFR if visibility unavailable """""" # Parse visibility if not vis: return 2 if vis.repr == 'CAVOK' or vis.repr.startswith('P6'): vis = 10 # type: ignore elif vis.repr.startswith('M'): vis = 0 # type: ignore # Convert meters to miles elif len(vis.repr) == 4: vis = vis.value * 0.000621371 # type: ignore else: vis = vis.value # type: ignore # Parse ceiling cld = ceiling.altitude if ceiling else 99 # Determine flight rules if (vis <= 5) or (cld <= 30): # type: ignore if (vis < 3) or (cld < 10): # type: ignore if (vis < 1) or (cld < 5): # type: ignore return 3 # LIFR return 2 # IFR return 1 # MVFR return 0","Returns int based on current flight rules from parsed METAR data 0=VFR, 1=MVFR, 2=IFR, 3=LIFR Note: Common practice is to report IFR if visibility unavailable" "def not_as_alias_handler(names_list): """"""Returns a list of names ignoring any aliases."""""" list_ = list() for alias in names_list: list_.append(alias.name) return list_",Returns a list of names ignoring any aliases. "def _contains_line(self, line): """"""Test if a chain of qubits is completely contained in ``self``. In particular, test if all qubits are present and the couplers connecting those qubits are also connected. NOTE: this function assumes that ``line`` is a list or tuple of qubits which satisfies the precondition that ``(line[i],line[i+1])`` is supposed to be a coupler for all ``i``. INPUTS: line: a list of qubits satisfying the above precondition OUTPUT: boolean """""" return all(v in self for v in line) and all(u in self[v] for u, v in zip(line, line[1::]))","Test if a chain of qubits is completely contained in ``self``. In particular, test if all qubits are present and the couplers connecting those qubits are also connected. NOTE: this function assumes that ``line`` is a list or tuple of qubits which satisfies the precondition that ``(line[i],line[i+1])`` is supposed to be a coupler for all ``i``. INPUTS: line: a list of qubits satisfying the above precondition OUTPUT: boolean" "def run(align_bams, items, ref_file, assoc_files, region, out_file): """"""Run octopus variant calling, handling both somatic and germline calling. """""" if not utils.file_exists(out_file): paired = vcfutils.get_paired_bams(align_bams, items) vrs = bedutils.population_variant_regions(items) target = shared.subset_variant_regions(vrs, region, out_file, items=items, do_merge=True) if paired: return _run_somatic(paired, ref_file, target, out_file) else: return _run_germline(align_bams, items, ref_file, target, out_file) return out_file","Run octopus variant calling, handling both somatic and germline calling." "def values(self, **kwargs): """"""Get the view's values"""""" result = yield self.get(**kwargs) if not result['rows']: raise Return([]) raise Return([x['value'] for x in result['rows']])",Get the view's values "def get_match_info(template, match, state): """""" Given a template and a regex match within said template, return a dictionary of information about the match to be used to help parse the template. """""" info = match.groupdict() # Put special delimiter cases in terms of normal ones if info['change']: info.update({ 'tag_type' : '=', 'tag_key' : info['delims'], }) elif info['raw']: info.update({ 'tag_type' : '&', 'tag_key' : info['raw_key'], }) # Rename the important match variables for convenience tag_start = match.start() tag_end = match.end() tag_type = info['tag_type'] tag_key = info['tag_key'] lead_wsp = info['lead_wsp'] end_wsp = info['end_wsp'] begins_line = (tag_start == 0) or (template[tag_start-1] in state.eol_chars) ends_line = (tag_end == len(template) or template[tag_end] in state.eol_chars) interpolating = (tag_type in ('', '&')) standalone = (not interpolating) and begins_line and ends_line if end_wsp: tag_end -= len(end_wsp) if standalone: template_length = len(template) # Standalone tags strip exactly one occurence of '\r', '\n', or '\r\n' # from the end of the line. if tag_end < len(template) and template[tag_end] == '\r': tag_end += 1 if tag_end < len(template) and template[tag_end] == '\n': tag_end += 1 elif lead_wsp: tag_start += len(lead_wsp) lead_wsp = '' info.update({ 'tag_start' : tag_start, 'tag_end' : tag_end, 'tag_type' : tag_type, 'tag_key' : tag_key, 'lead_wsp' : lead_wsp, 'end_wsp' : end_wsp, 'begins_line' : begins_line, 'ends_line' : ends_line, 'interpolating' : interpolating, 'standalone' : standalone, }) return info","Given a template and a regex match within said template, return a dictionary of information about the match to be used to help parse the template." "def get_bond_length(sp1, sp2, bond_order=1): """""" Get the bond length between two species. Args: sp1 (Specie): First specie. sp2 (Specie): Second specie. bond_order: For species with different possible bond orders, this allows one to obtain the bond length for a particular bond order. For example, to get the C=C bond length instead of the C-C bond length, this should be set to 2. Defaults to 1. Returns: Bond length in Angstrom. If no data is available, the sum of the atomic radius is used. """""" sp1 = Element(sp1) if isinstance(sp1, str) else sp1 sp2 = Element(sp2) if isinstance(sp2, str) else sp2 try: all_lengths = obtain_all_bond_lengths(sp1, sp2) return all_lengths[bond_order] # The ValueError is raised in `obtain_all_bond_lengths` where no bond # data for both elements is found. The KeyError is raised in # `__getitem__` method of `dict` builtin class where although bond data # for both elements is found, the data for specified bond order does # not exist. In both cases, sum of atomic radius is returned. except (ValueError, KeyError): warnings.warn(""No order %d bond lengths between %s and %s found in "" ""database. Returning sum of atomic radius."" % (bond_order, sp1, sp2)) return sp1.atomic_radius + sp2.atomic_radius","Get the bond length between two species. Args: sp1 (Specie): First specie. sp2 (Specie): Second specie. bond_order: For species with different possible bond orders, this allows one to obtain the bond length for a particular bond order. For example, to get the C=C bond length instead of the C-C bond length, this should be set to 2. Defaults to 1. Returns: Bond length in Angstrom. If no data is available, the sum of the atomic radius is used." "def get_basedir(path): """"""Returns the base directory of a path. Examples: get_basedir('foo/bar/baz') --> 'foo' get_basedir('/foo/bar/baz') --> '' get_basedir('foo') --> 'foo' """""" return path[:path.index(os.sep)] if os.sep in path else path","Returns the base directory of a path. Examples: get_basedir('foo/bar/baz') --> 'foo' get_basedir('/foo/bar/baz') --> '' get_basedir('foo') --> 'foo'" "def _lob_end_handler_factory(ion_type, action, validate=lambda c, ctx, action_res: None): """"""Generates handlers for the end of blob or clob values. Args: ion_type (IonType): The type of this lob (either blob or clob). action (callable): Called for each non-whitespace, non-closing brace character encountered before the end of the lob. Accepts the current character's ordinal, the current context, the previous character's ordinal, the result of the previous call to ``action`` (if any), and True if this is the first call to ``action``. Returns any state that will be needed by subsequent calls to ``action``. For blobs, this should validate the character is valid base64; for clobs, this should ensure there are no illegal characters (e.g. comments) between the end of the data and the end of the clob. validate (Optional[callable]): Called once the second closing brace has been found. Accepts the current character's ordinal, the current context, and the result of the last call to ``action``; raises an error if this is not a valid lob value. """""" assert ion_type is IonType.BLOB or ion_type is IonType.CLOB @coroutine def lob_end_handler(c, ctx): val = ctx.value prev = c action_res = None if c != _CLOSE_BRACE and c not in _WHITESPACE: action_res = action(c, ctx, prev, action_res, True) c, self = yield trans = ctx.immediate_transition(self) while True: if c in _WHITESPACE: if prev == _CLOSE_BRACE: _illegal_character(c, ctx.set_ion_type(ion_type), 'Expected }.') elif c == _CLOSE_BRACE: if prev == _CLOSE_BRACE: validate(c, ctx, action_res) break else: action_res = action(c, ctx, prev, action_res, False) prev = c c, _ = yield trans ctx.set_self_delimiting(True) # Lob values are self-delimiting (they are terminated by '}}'). yield ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ion_type, _parse_lob(ion_type, val)) return lob_end_handler","Generates handlers for the end of blob or clob values. Args: ion_type (IonType): The type of this lob (either blob or clob). action (callable): Called for each non-whitespace, non-closing brace character encountered before the end of the lob. Accepts the current character's ordinal, the current context, the previous character's ordinal, the result of the previous call to ``action`` (if any), and True if this is the first call to ``action``. Returns any state that will be needed by subsequent calls to ``action``. For blobs, this should validate the character is valid base64; for clobs, this should ensure there are no illegal characters (e.g. comments) between the end of the data and the end of the clob. validate (Optional[callable]): Called once the second closing brace has been found. Accepts the current character's ordinal, the current context, and the result of the last call to ``action``; raises an error if this is not a valid lob value." "async def init(app, loop): """"""Sends a message to the webhook channel when server starts."""""" app.session = aiohttp.ClientSession(loop=loop) # to make web requests app.webhook = Webhook.Async(webhook_url, session=app.session) em = Embed(color=0x2ecc71) em.set_author('[INFO] Starting Worker') em.description = 'Host: {}'.format(socket.gethostname()) await app.webhook.send(embed=em)",Sends a message to the webhook channel when server starts. "def get_slot_positions(self, type='a'): """"""Returns a list with the slots occupied for the type passed in. Allowed type of analyses are: 'a' (routine analysis) 'b' (blank analysis) 'c' (control) 'd' (duplicate) 'all' (all analyses) :param type: type of the analysis :return: list of slot positions """""" if type not in ALLOWED_ANALYSES_TYPES and type != ALL_ANALYSES_TYPES: return list() layout = self.getLayout() slots = list() for pos in layout: if type != ALL_ANALYSES_TYPES and pos['type'] != type: continue slots.append(to_int(pos['position'])) # return a unique list of sorted slot positions return sorted(set(slots))","Returns a list with the slots occupied for the type passed in. Allowed type of analyses are: 'a' (routine analysis) 'b' (blank analysis) 'c' (control) 'd' (duplicate) 'all' (all analyses) :param type: type of the analysis :return: list of slot positions" "def _write_var_data_sparse(self, f, zVar, var, dataType, numElems, recVary, oneblock): ''' Writes a VVR and a VXR for this block of sparse data Parameters: f : file The open CDF file zVar : bool True if this is for a z variable var : int The variable number dataType : int The CDF data type of this variable numElems : str The number of elements in each record recVary : bool True if the value varies across records oneblock: list A list of data in the form [startrec, endrec, [data]] Returns: recend : int Just the ""endrec"" value input by the user in ""oneblock"" ''' rec_start = oneblock[0] rec_end = oneblock[1] indata = oneblock[2] numValues = self._num_values(zVar, var) # Convert oneblock[2] into a byte stream _, data = self._convert_data(dataType, numElems, numValues, indata) # Gather dimension information if zVar: vdr_offset = self.zvarsinfo[var][1] else: vdr_offset = self.rvarsinfo[var][1] # Write one VVR offset = self._write_vvr(f, data) f.seek(vdr_offset+28, 0) # Get first VXR vxrOne = int.from_bytes(f.read(8), 'big', signed=True) foundSpot = 0 usedEntries = 0 currentVXR = 0 # Search through VXRs to find an open one while foundSpot == 0 and vxrOne > 0: # have a VXR f.seek(vxrOne, 0) currentVXR = f.tell() f.seek(vxrOne+12, 0) vxrNext = int.from_bytes(f.read(8), 'big', signed=True) nEntries = int.from_bytes(f.read(4), 'big', signed=True) usedEntries = int.from_bytes(f.read(4), 'big', signed=True) if (usedEntries == nEntries): # all entries are used -- check the next vxr in link vxrOne = vxrNext else: # found a vxr with an vailable entry spot foundSpot = 1 # vxrOne == 0 from vdr's vxrhead vxrOne == -1 from a vxr's vxrnext if (vxrOne == 0 or vxrOne == -1): # no available vxr... create a new one currentVXR = self._create_vxr(f, rec_start, rec_end, vdr_offset, currentVXR, offset) else: self._use_vxrentry(f, currentVXR, rec_start, rec_end, offset) # Modify the VDR's MaxRec if needed f.seek(vdr_offset+24, 0) recNumc = int.from_bytes(f.read(4), 'big', signed=True) if (rec_end > recNumc): self._update_offset_value(f, vdr_offset+24, 4, rec_end) return rec_end","Writes a VVR and a VXR for this block of sparse data Parameters: f : file The open CDF file zVar : bool True if this is for a z variable var : int The variable number dataType : int The CDF data type of this variable numElems : str The number of elements in each record recVary : bool True if the value varies across records oneblock: list A list of data in the form [startrec, endrec, [data]] Returns: recend : int Just the ""endrec"" value input by the user in ""oneblock""" "def ot_find_tree(arg_dict, exact=True, verbose=False, oti_wrapper=None): """"""Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`. The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used. All other arguments correspond to the arguments of the web-service call. """""" if oti_wrapper is None: from peyotl.sugar import oti oti_wrapper = oti return oti_wrapper.find_trees(arg_dict, exact=exact, verbose=verbose, wrap_response=True)","Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`. The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used. All other arguments correspond to the arguments of the web-service call." "def getPhotosets(self): """"""Returns a list of Photosets."""""" method = 'flickr.photosets.getList' data = _doget(method, user_id=self.id) sets = [] if isinstance(data.rsp.photosets.photoset, list): for photoset in data.rsp.photosets.photoset: sets.append(Photoset(photoset.id, photoset.title.text,\ Photo(photoset.primary),\ secret=photoset.secret, \ server=photoset.server, \ description=photoset.description.text, photos=photoset.photos)) else: photoset = data.rsp.photosets.photoset sets.append(Photoset(photoset.id, photoset.title.text,\ Photo(photoset.primary),\ secret=photoset.secret, \ server=photoset.server, \ description=photoset.description.text, photos=photoset.photos)) return sets",Returns a list of Photosets. "def identify(self, req, resp, resource, uri_kwargs): """"""Initialize X-Api-Key authentication middleware."""""" try: return req.get_header('X-Api-Key', True) except (KeyError, HTTPMissingHeader): pass",Initialize X-Api-Key authentication middleware. "def get_name(self, label_type): """""" returns the most preferred label name if there isn't any correct name in the list it will return newest label name """""" if label_type in self._label_values: return self._label_values[label_type][0] else: return Labels.LABEL_NAMES[label_type][0]","returns the most preferred label name if there isn't any correct name in the list it will return newest label name" "def pick(self, connections): """"""Picks a connection with the earliest backoff time. As a result, the first connection is picked for as long as it has no backoff time. Otherwise, the connections are tried in a round robin fashion. Args: connections (:obj:list): List of :class:`~bigchaindb_driver.connection.Connection` instances. """""" if len(connections) == 1: return connections[0] def key(conn): return (datetime.min if conn.backoff_time is None else conn.backoff_time) return min(*connections, key=key)","Picks a connection with the earliest backoff time. As a result, the first connection is picked for as long as it has no backoff time. Otherwise, the connections are tried in a round robin fashion. Args: connections (:obj:list): List of :class:`~bigchaindb_driver.connection.Connection` instances." "def _detach_received(self, error): """"""Callback called when a link DETACH frame is received. This callback will process the received DETACH error to determine if the link is recoverable or whether it should be shutdown. :param error: The error information from the detach frame. :type error: ~uamqp.errors.ErrorResponse """""" # pylint: disable=protected-access if error: condition = error.condition description = error.description info = error.info else: condition = b""amqp:unknown-error"" description = None info = None self._error = errors._process_link_error(self.error_policy, condition, description, info) _logger.info(""Received Link detach event: %r\nLink: %r\nDescription: %r"" ""\nDetails: %r\nRetryable: %r\nConnection: %r"", condition, self.name, description, info, self._error.action.retry, self._session._connection.container_id)","Callback called when a link DETACH frame is received. This callback will process the received DETACH error to determine if the link is recoverable or whether it should be shutdown. :param error: The error information from the detach frame. :type error: ~uamqp.errors.ErrorResponse" "def underline(self, msg): """"""Underline the input"""""" return click.style(msg, underline=True) if self.colorize else msg",Underline the input "def ordered(start, edges, predicate=None, inverse=False): """""" Depth first edges from a SciGraph response. """""" s, o = 'sub', 'obj' if inverse: s, o = o, s for edge in edges: if predicate is not None and edge['pred'] != predicate: print('scoop!') continue if edge[s] == start: yield edge yield from Graph.ordered(edge[o], edges, predicate=predicate)",Depth first edges from a SciGraph response. "def _recalculate_extents_and_offsets(self, index, logical_block_size): # type: (int, int) -> Tuple[int, int] ''' Internal method to recalculate the extents and offsets associated with children of this directory record. Parameters: index - The index at which to start the recalculation. logical_block_size - The block size to use for comparisons. Returns: A tuple where the first element is the total number of extents required by the children and where the second element is the offset into the last extent currently being used. ''' if index == 0: dirrecord_offset = 0 num_extents = 1 else: dirrecord_offset = self.children[index - 1].offset_to_here num_extents = self.children[index - 1].extents_to_here for i in range(index, len(self.children)): c = self.children[i] dirrecord_len = c.dr_len if (dirrecord_offset + dirrecord_len) > logical_block_size: num_extents += 1 dirrecord_offset = 0 dirrecord_offset += dirrecord_len c.extents_to_here = num_extents c.offset_to_here = dirrecord_offset c.index_in_parent = i return num_extents, dirrecord_offset","Internal method to recalculate the extents and offsets associated with children of this directory record. Parameters: index - The index at which to start the recalculation. logical_block_size - The block size to use for comparisons. Returns: A tuple where the first element is the total number of extents required by the children and where the second element is the offset into the last extent currently being used." "def copy(self, **params): '''Creates the new instance of the Route substituting the requested parameters.''' new_params = dict() for name in ['owner', 'priority', 'key', 'final']: new_params[name] = params.get(name, getattr(self, name)) return Route(**new_params)","Creates the new instance of the Route substituting the requested parameters." "def getOrCreate(cls, sc): """""" Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """""" if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(sc, jsqlContext.sparkSession()) cls(sc, sparkSession, jsqlContext) return cls._instantiatedContext","Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext" "def from_file(cls, f, fname=None, readers=None): """"""Create a Document from a file. Usage:: with open('paper.html', 'rb') as f: doc = Document.from_file(f) .. note:: Always open files in binary mode by using the 'rb' parameter. :param file|string f: A file-like object or path to a file. :param string fname: (Optional) The filename. Used to help determine file format. :param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use. """""" if isinstance(f, six.string_types): f = io.open(f, 'rb') if not fname and hasattr(f, 'name'): fname = f.name return cls.from_string(f.read(), fname=fname, readers=readers)","Create a Document from a file. Usage:: with open('paper.html', 'rb') as f: doc = Document.from_file(f) .. note:: Always open files in binary mode by using the 'rb' parameter. :param file|string f: A file-like object or path to a file. :param string fname: (Optional) The filename. Used to help determine file format. :param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use." "def rename(self, new_name): """"""Rename project and rename its root path accordingly."""""" old_name = self.name self.name = new_name pypath = self.relative_pythonpath # ?? self.root_path = self.root_path[:-len(old_name)]+new_name self.relative_pythonpath = pypath # ?? self.save()",Rename project and rename its root path accordingly. "def view(self, shape=None, chunks=None, dtype=None, fill_value=None, filters=None, read_only=None, synchronizer=None): """"""Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views """""" store = self._store chunk_store = self._chunk_store path = self._path if read_only is None: read_only = self._read_only if synchronizer is None: synchronizer = self._synchronizer a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only, synchronizer=synchronizer, cache_metadata=True) a._is_view = True # allow override of some properties if dtype is None: dtype = self._dtype else: dtype = np.dtype(dtype) a._dtype = dtype if shape is None: shape = self._shape else: shape = normalize_shape(shape) a._shape = shape if chunks is not None: chunks = normalize_chunks(chunks, shape, dtype.itemsize) a._chunks = chunks if fill_value is not None: a._fill_value = fill_value if filters is not None: a._filters = filters return a","Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views" "def get_subplot_params(figsize): """"""Return sensible default `SubplotParams` for a figure of the given size Parameters ---------- figsize : `tuple` of `float` the ``(width, height)`` figure size (inches) Returns ------- params : `~matplotlib.figure.SubplotParams` formatted set of subplot parameters """""" width, height, = figsize try: left, right = SUBPLOT_WIDTH[width] except KeyError: left = right = None try: bottom, top = SUBPLOT_HEIGHT[height] except KeyError: bottom = top = None return SubplotParams(left=left, bottom=bottom, right=right, top=top)","Return sensible default `SubplotParams` for a figure of the given size Parameters ---------- figsize : `tuple` of `float` the ``(width, height)`` figure size (inches) Returns ------- params : `~matplotlib.figure.SubplotParams` formatted set of subplot parameters" "def build_argument_parser(executable): """"""creates an argument parser from the given `executable` model. An argument '__xml__' for ""--xml"" is added independently. :param executable: CLI Model :type executable: clictk.model.Executable :return: """""" a = ArgumentParser() a.add_argument(""--xml"", action=""store_true"", dest=""__xml__"", help=""show cli xml"") for p in executable: o = [] if p.flag: o.append(""-%s"" % p.flag) if p.longflag: o.append(""--%s"" % p.longflag) a.add_argument( *o, metavar=p.type.upper(), dest=p.name, action=""store"", help=(p.description.strip() or ""parameter %s"" % p.name) ) return a","creates an argument parser from the given `executable` model. An argument '__xml__' for ""--xml"" is added independently. :param executable: CLI Model :type executable: clictk.model.Executable :return:" "def select_point(action, action_space, select_point_act, screen): """"""Select a unit at a point."""""" select = spatial(action, action_space).unit_selection_point screen.assign_to(select.selection_screen_coord) select.type = select_point_act",Select a unit at a point. "def update_DOM(self): """""" Makes a request and updates `self._DOM`. Worth using only if you manually change `self.base_url` or `self.path`. :return: self :rtype: Url """""" response = self.fetch() self._DOM = html.fromstring(response.text) return self","Makes a request and updates `self._DOM`. Worth using only if you manually change `self.base_url` or `self.path`. :return: self :rtype: Url" "def get_req_list(): """"""Get the requirements by weather we're building develop or not."""""" req_list = parse_reqs(REQUIREMENTS['prod']) if len(sys.argv) > 2 and sys.argv[2] == ('develop'): req_list += parse_reqs(REQUIREMENTS['dev']) return req_list",Get the requirements by weather we're building develop or not. "def text_search(self, search, *, limit=0, table='assets'): """"""Return an iterator of assets that match the text search Args: search (str): Text search string to query the text index limit (int, optional): Limit the number of returned documents. Returns: iter: An iterator of assets that match the text search. """""" return backend.query.text_search(self.connection, search, limit=limit, table=table)","Return an iterator of assets that match the text search Args: search (str): Text search string to query the text index limit (int, optional): Limit the number of returned documents. Returns: iter: An iterator of assets that match the text search." "def password_args(subparsers): """"""Add command line options for the set_password operation"""""" password_parser = subparsers.add_parser('set_password') password_parser.add_argument('vault_path', help='Path which contains password' 'secret to be udpated') base_args(password_parser)",Add command line options for the set_password operation "def print_sorted_counter(counter, tab=1): """"""print all elements of a counter in descending order"""""" for key, count in sorted(counter.items(), key=itemgetter(1), reverse=True): print ""{0}{1} - {2}"".format('\t'*tab, key, count)",print all elements of a counter in descending order "def make_iterable(obj): """"""Make an object iterable. >>> make_iterable(obj='hello') ('hello',) >>> make_iterable(obj=None) () """""" if not obj: return tuple() if isinstance(obj, (list, tuple, set)): return obj return (obj,)","Make an object iterable. >>> make_iterable(obj='hello') ('hello',) >>> make_iterable(obj=None) ()" "def get_common_args(parser, args=None): """""" Return list of OSPD common command-line arguments from parser, after validating provided values or setting default ones. """""" options = parser.parse_args(args) # TCP Port to listen on. port = options.port # Network address to bind listener to address = options.bind_address # Unix file socket to listen on unix_socket = options.unix_socket # Debug level. log_level = options.log_level # Server key path. keyfile = options.key_file or KEY_FILE # Server cert path. certfile = options.cert_file or CERT_FILE # CA cert path. cafile = options.ca_file or CA_FILE common_args = dict() common_args['port'] = port common_args['address'] = address common_args['unix_socket'] = unix_socket common_args['keyfile'] = keyfile common_args['certfile'] = certfile common_args['cafile'] = cafile common_args['log_level'] = log_level common_args['foreground'] = options.foreground common_args['log_file'] = options.log_file common_args['version'] = options.version return common_args","Return list of OSPD common command-line arguments from parser, after validating provided values or setting default ones." "def _is_second_run(): """"""Returns `True` when we know that `fuck` called second time."""""" tracker_path = _get_not_configured_usage_tracker_path() if not tracker_path.exists(): return False current_pid = _get_shell_pid() with tracker_path.open('r') as tracker: try: info = json.load(tracker) except ValueError: return False if not (isinstance(info, dict) and info.get('pid') == current_pid): return False return (_get_previous_command() == 'fuck' or time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT)",Returns `True` when we know that `fuck` called second time. "def predictions_iter(self): """""" property decorated prediction iterator Returns ------- iterator : iterator iterator on prediction sensitivity vectors (matrix) """""" for fname in self.forecast_names: yield self.predictions.get(col_names=fname)","property decorated prediction iterator Returns ------- iterator : iterator iterator on prediction sensitivity vectors (matrix)" "def _bind_target(self, target, ctx=None): """"""Method to override in order to specialize binding of target. :param target: target to bind. :param ctx: target ctx. :return: bound target. """""" result = target try: # get annotations from target if exists. local_annotations = get_local_property( target, Annotation.__ANNOTATIONS_KEY__, [], ctx=ctx ) except TypeError: raise TypeError('target {0} must be hashable.'.format(target)) # if local_annotations do not exist, put them in target if not local_annotations: put_properties( target, properties={Annotation.__ANNOTATIONS_KEY__: local_annotations}, ctx=ctx ) # insert self at first position local_annotations.insert(0, self) # add target to self targets if target not in self.targets: self.targets.append(target) return result","Method to override in order to specialize binding of target. :param target: target to bind. :param ctx: target ctx. :return: bound target." "def _init_from_dict(self, model_dict): """""" Initiate self from a model_dict to make sure attributes such as vars, params are available. Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters. Finally it creates a signature for this model so it can be called nicely. This signature only contains independent vars and params, as one would expect. :param model_dict: dict of (dependent_var, expression) pairs. """""" sort_func = lambda symbol: symbol.name self.model_dict = OrderedDict(sorted(model_dict.items(), key=lambda i: sort_func(i[0]))) # Everything at the bottom of the toposort is independent, at the top # dependent, and the rest interdependent. ordered = list(toposort(self.connectivity_mapping)) independent = sorted(ordered.pop(0), key=sort_func) self.dependent_vars = sorted(ordered.pop(-1), key=sort_func) self.interdependent_vars = sorted( [item for items in ordered for item in items], key=sort_func ) # `independent` contains both params and vars, needs to be separated self.independent_vars = [s for s in independent if not isinstance(s, Parameter) and not s in self] self.params = [s for s in independent if isinstance(s, Parameter)] try: assert not any(isinstance(var, Parameter) for var in self.dependent_vars) assert not any(isinstance(var, Parameter) for var in self.interdependent_vars) except AssertionError: raise ModelError('`Parameter`\'s can not feature in the role ' 'of `Variable`') # Make Variable object corresponding to each depedent var. self.sigmas = {var: Variable(name='sigma_{}'.format(var.name)) for var in self.dependent_vars}","Initiate self from a model_dict to make sure attributes such as vars, params are available. Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters. Finally it creates a signature for this model so it can be called nicely. This signature only contains independent vars and params, as one would expect. :param model_dict: dict of (dependent_var, expression) pairs." "def replace_uuid_w_names(self, resp): """""" Replace the uuid's with names. Parameters ---------- resp : ??? ??? Returns ------- ??? ??? """""" col_mapper = self.get_point_name(resp.context)[""?point""].to_dict() resp.df.rename(columns=col_mapper, inplace=True) return resp","Replace the uuid's with names. Parameters ---------- resp : ??? ??? Returns ------- ??? ???" "def _build_model(self): """""" Build model. """""" if ""input_dim"" not in self.settings: raise ValueError(""Model parameter input_dim cannot be None."") self.linear = nn.Linear( self.settings[""input_dim""], self.cardinality, self.settings[""bias""] )",Build model. "def device_discovered (self, address, device_class, rssi, name): """""" Called when a bluetooth device is discovered. address is the bluetooth address of the device device_class is the Class of Device, as specified in [1] passed in as a 3-byte string name is the user-friendly name of the device if lookup_names was set when the inquiry was started. otherwise None This method exists to be overriden. [1] https://www.bluetooth.org/foundry/assignnumb/document/baseband """""" if name: print((""found: %s - %s (class 0x%X, rssi %s)"" % \ (address, name, device_class, rssi))) else: print((""found: %s (class 0x%X)"" % (address, device_class))) print((""found: %s (class 0x%X, rssi %s)"" % \ (address, device_class, rssi)))","Called when a bluetooth device is discovered. address is the bluetooth address of the device device_class is the Class of Device, as specified in [1] passed in as a 3-byte string name is the user-friendly name of the device if lookup_names was set when the inquiry was started. otherwise None This method exists to be overriden. [1] https://www.bluetooth.org/foundry/assignnumb/document/baseband" "def _stmt_graph_annotate_edges(self, edges_to_annotate, **new_labels): """""" Add new annotations to edges in the statement dependence graph. :param list edges_to_annotate: A list of edges to annotate. :param new_labels: New labels to be added to those edges. :returns: None """""" graph = self.graph for src, dst in edges_to_annotate: if src not in graph: continue if dst not in graph[src]: continue data = graph[src][dst] for k, v in new_labels.items(): if k in data: if v not in data[k]: data[k] = data[k] + (v,) else: # Construct a tuple data[k] = (v,)","Add new annotations to edges in the statement dependence graph. :param list edges_to_annotate: A list of edges to annotate. :param new_labels: New labels to be added to those edges. :returns: None" "def set_pair(self, term1, term2, value, **kwargs): """""" Set the value for a pair of terms. Args: term1 (str) term2 (str) value (mixed) """""" key = self.key(term1, term2) self.keys.update([term1, term2]) self.pairs[key] = value","Set the value for a pair of terms. Args: term1 (str) term2 (str) value (mixed)" "def getJSMinimumVolume(self, **kw): """"""Try convert the MinimumVolume to 'ml' or 'g' so that JS has an easier time working with it. If conversion fails, return raw value. """""" default = self.Schema()['MinimumVolume'].get(self) try: mgdefault = default.split(' ', 1) mgdefault = mg(float(mgdefault[0]), mgdefault[1]) except: mgdefault = mg(0, 'ml') try: return str(mgdefault.ounit('ml')) except: pass try: return str(mgdefault.ounit('g')) except: pass return str(default)","Try convert the MinimumVolume to 'ml' or 'g' so that JS has an easier time working with it. If conversion fails, return raw value." "def deser(val): """""" Deserialize from a string representation of an long integer to the python representation of a long integer. :param val: The string representation of the long integer. :return: The long integer. """""" if isinstance(val, str): _val = val.encode(""utf-8"") else: _val = val return base64_to_long(_val)","Deserialize from a string representation of an long integer to the python representation of a long integer. :param val: The string representation of the long integer. :return: The long integer." "def reset_group_subscription(self): """"""Reset the group's subscription to only contain topics subscribed by this consumer."""""" if self._user_assignment: raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE) assert self.subscription is not None, 'Subscription required' self._group_subscription.intersection_update(self.subscription)",Reset the group's subscription to only contain topics subscribed by this consumer. "def compress_for_output_listing(paths): """"""Returns a tuple of 2 sets of which paths to display to user The first set contains paths that would be deleted. Files of a package are not added and the top-level directory of the package has a '*' added at the end - to signify that all it's contents are removed. The second set contains files that would have been skipped in the above folders. """""" will_remove = list(paths) will_skip = set() # Determine folders and files folders = set() files = set() for path in will_remove: if path.endswith("".pyc""): continue if path.endswith(""__init__.py"") or "".dist-info"" in path: folders.add(os.path.dirname(path)) files.add(path) _normcased_files = set(map(os.path.normcase, files)) folders = compact(folders) # This walks the tree using os.walk to not miss extra folders # that might get added. for folder in folders: for dirpath, _, dirfiles in os.walk(folder): for fname in dirfiles: if fname.endswith("".pyc""): continue file_ = os.path.join(dirpath, fname) if (os.path.isfile(file_) and os.path.normcase(file_) not in _normcased_files): # We are skipping this file. Add it to the set. will_skip.add(file_) will_remove = files | { os.path.join(folder, ""*"") for folder in folders } return will_remove, will_skip","Returns a tuple of 2 sets of which paths to display to user The first set contains paths that would be deleted. Files of a package are not added and the top-level directory of the package has a '*' added at the end - to signify that all it's contents are removed. The second set contains files that would have been skipped in the above folders." "def error(self, correlation_id, error, message, *args, **kwargs): """""" Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """""" self._format_and_write(LogLevel.Error, correlation_id, error, message, args, kwargs)","Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message." "def get_email_templates(self, params=None): """""" Get all e-mail templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """""" if not params: params = {} return self._iterate_through_pages(self.get_email_templates_per_page, resource=EMAIL_TEMPLATES, **{'params': params})","Get all e-mail templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list" "def mode_string_v09(msg): '''mode string for 0.9 protocol''' mode = msg.mode nav_mode = msg.nav_mode MAV_MODE_UNINIT = 0 MAV_MODE_MANUAL = 2 MAV_MODE_GUIDED = 3 MAV_MODE_AUTO = 4 MAV_MODE_TEST1 = 5 MAV_MODE_TEST2 = 6 MAV_MODE_TEST3 = 7 MAV_NAV_GROUNDED = 0 MAV_NAV_LIFTOFF = 1 MAV_NAV_HOLD = 2 MAV_NAV_WAYPOINT = 3 MAV_NAV_VECTOR = 4 MAV_NAV_RETURNING = 5 MAV_NAV_LANDING = 6 MAV_NAV_LOST = 7 MAV_NAV_LOITER = 8 cmode = (mode, nav_mode) mapping = { (MAV_MODE_UNINIT, MAV_NAV_GROUNDED) : ""INITIALISING"", (MAV_MODE_MANUAL, MAV_NAV_VECTOR) : ""MANUAL"", (MAV_MODE_TEST3, MAV_NAV_VECTOR) : ""CIRCLE"", (MAV_MODE_GUIDED, MAV_NAV_VECTOR) : ""GUIDED"", (MAV_MODE_TEST1, MAV_NAV_VECTOR) : ""STABILIZE"", (MAV_MODE_TEST2, MAV_NAV_LIFTOFF) : ""FBWA"", (MAV_MODE_AUTO, MAV_NAV_WAYPOINT) : ""AUTO"", (MAV_MODE_AUTO, MAV_NAV_RETURNING) : ""RTL"", (MAV_MODE_AUTO, MAV_NAV_LOITER) : ""LOITER"", (MAV_MODE_AUTO, MAV_NAV_LIFTOFF) : ""TAKEOFF"", (MAV_MODE_AUTO, MAV_NAV_LANDING) : ""LANDING"", (MAV_MODE_AUTO, MAV_NAV_HOLD) : ""LOITER"", (MAV_MODE_GUIDED, MAV_NAV_VECTOR) : ""GUIDED"", (MAV_MODE_GUIDED, MAV_NAV_WAYPOINT) : ""GUIDED"", (100, MAV_NAV_VECTOR) : ""STABILIZE"", (101, MAV_NAV_VECTOR) : ""ACRO"", (102, MAV_NAV_VECTOR) : ""ALT_HOLD"", (107, MAV_NAV_VECTOR) : ""CIRCLE"", (109, MAV_NAV_VECTOR) : ""LAND"", } if cmode in mapping: return mapping[cmode] return ""Mode(%s,%s)"" % cmode",mode string for 0.9 protocol "def numDisparities(self, value): """"""Set private ``_num_disp`` and reset ``_block_matcher``."""""" if value > 0 and value % 16 == 0: self._num_disp = value else: raise InvalidNumDisparitiesError(""numDisparities must be a "" ""positive integer evenly "" ""divisible by 16."") self._replace_bm()",Set private ``_num_disp`` and reset ``_block_matcher``. "def unlocked(self): """"""``True`` if achievement is unlocked. :rtype: bool """""" achieved = CRef.cbool() result = self._iface.get_ach(self.name, achieved) if not result: return False return bool(achieved)","``True`` if achievement is unlocked. :rtype: bool" "def bchar(posh, posv, border_style): """""" Retrieve table border style for particular box border piece. """""" index = '{}{}'.format(posv, posh).lower() return BORDER_STYLES[border_style][index]",Retrieve table border style for particular box border piece. "def static_uplink_detect(self, veth): """"""Return the static uplink based on argument passed. The very first time, this function is called, it returns the uplink port read from a file. After restart, when this function is called the first time, it returns 'normal' assuming a veth is passed to this function which will be the case if uplink processing is successfully done. If user modified the uplink configuration and restarted, a 'down' will be returned to clear the old uplink. """""" LOG.info(""In static_uplink_detect %(veth)s"", {'veth': veth}) if self.static_uplink_first: self.static_uplink_first = False if self.phy_uplink is not None and ( self.phy_uplink != self.static_uplink_port): return 'down' if veth is None: return self.static_uplink_port else: return 'normal'","Return the static uplink based on argument passed. The very first time, this function is called, it returns the uplink port read from a file. After restart, when this function is called the first time, it returns 'normal' assuming a veth is passed to this function which will be the case if uplink processing is successfully done. If user modified the uplink configuration and restarted, a 'down' will be returned to clear the old uplink." "def lerp_quat(from_quat, to_quat, percent): """"""Return linear interpolation of two quaternions."""""" # Check if signs need to be reversed. if dot_quat(from_quat, to_quat) < 0.0: to_sign = -1 else: to_sign = 1 # Simple linear interpolation percent_from = 1.0 - percent percent_to = percent result = Quat( percent_from * from_quat.x + to_sign * percent_to * to_quat.x, percent_from * from_quat.y + to_sign * percent_to * to_quat.y, percent_from * from_quat.z + to_sign * percent_to * to_quat.z, percent_from * from_quat.w + to_sign * percent_to * to_quat.w) return result",Return linear interpolation of two quaternions. "def tarbell_spreadsheet(command, args): """""" Open context spreadsheet """""" with ensure_settings(command, args) as settings, ensure_project(command, args) as site: try: # First, try to get the Google Spreadsheet URL spreadsheet_url = _google_spreadsheet_url(site.project.SPREADSHEET_KEY) except AttributeError: # The project doesn't seem to be using a Google Spreadsheet. # Try the URL or path specified in the CONTEXT_SOURCE_FILE setting try: spreadsheet_url = _context_source_file_url( site.project.CONTEXT_SOURCE_FILE) print(spreadsheet_url) except AttributeError: puts(colored.red(""No Google spreadsheet or context source file "" ""has been configured.\n"")) return # Use the webbrowser package to try to open the file whether it's a # remote URL on the web, or a local file. On some platforms it will # successfully open local files in the default application. # This seems preferable to trying to do os detection and calling # the system-specific command for opening files in default # applications. # See # http://stackoverflow.com/questions/434597/open-document-with-default-application-in-python webbrowser.open(spreadsheet_url)",Open context spreadsheet "def do_delete_stack(self,args): """"""Delete specified stack. delete_stack -h for detailed help."""""" parser = CommandArgumentParser(""delete_stack"") parser.add_argument(dest='stack',help='stack index or name'); args = vars(parser.parse_args(args)) try: index = int(args['stack']) if self.stackList == None: self.do_stacks('-s') stack = AwsConnectionFactory.instance.getCfResource().Stack(self.stackList[index]['StackName']) except ValueError: stack = AwsConnectionFactory.instance.getCfResource().Stack(args['stack']) print ""Here are the details of the stack you are about to delete:"" print ""Stack.name: {}"".format(stack.name) print ""Stack.stack_id: {}"".format(stack.stack_id) print ""Stack.creation_time: {}"".format(stack.creation_time) confirmation = raw_input(""If you are sure, enter the Stack.name here: "") if stack.name == confirmation: stack.delete() print ""Stack deletion in progress"" else: print ""Stack deletion canceled: '{}' != '{}'"".format(stack.name,confirmation)",Delete specified stack. delete_stack -h for detailed help. "def identify_ibids(line): """"""Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed """""" ibid_match_txt = {} # Record details of each matched ibid: for m_ibid in re_ibid.finditer(line): ibid_match_txt[m_ibid.start()] = m_ibid.group(0) # Replace matched text in line with underscores: line = line[0:m_ibid.start()] + \ ""_"" * len(m_ibid.group(0)) + \ line[m_ibid.end():] return ibid_match_txt, line","Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed" "def validate_redirect_url(next_url): """""" Returns the next_url path if next_url matches allowed hosts. """""" if not next_url: return None parts = urlparse(next_url) if parts.netloc: domain, _ = split_domain_port(parts.netloc) allowed_hosts = (['*'] if django_settings.DEBUG else django_settings.ALLOWED_HOSTS) if not (domain and validate_host(domain, allowed_hosts)): return None return urlunparse(("""", """", parts.path, parts.params, parts.query, parts.fragment))",Returns the next_url path if next_url matches allowed hosts. "def pad_collate(samples:BatchSamples, pad_idx:int=1, pad_first:bool=True, backwards:bool=False) -> Tuple[LongTensor, LongTensor]: ""Function that collect samples and adds padding. Flips token order if needed"" samples = to_data(samples) max_len = max([len(s[0]) for s in samples]) res = torch.zeros(len(samples), max_len).long() + pad_idx if backwards: pad_first = not pad_first for i,s in enumerate(samples): if pad_first: res[i,-len(s[0]):] = LongTensor(s[0]) else: res[i,:len(s[0]):] = LongTensor(s[0]) if backwards: res = res.flip(1) return res, tensor(np.array([s[1] for s in samples]))",Function that collect samples and adds padding. Flips token order if needed "def get_json_files(files, recursive=False): """"""Return a list of files to validate from `files`. If a member of `files` is a directory, its children with a ``.json`` extension will be added to the return value. Args: files: A list of file paths and/or directory paths. recursive: If ``true``, this will descend into any subdirectories of input directories. Returns: A list of file paths to validate. """""" json_files = [] if not files: return json_files for fn in files: if os.path.isdir(fn): children = list_json_files(fn, recursive) json_files.extend(children) elif is_json(fn): json_files.append(fn) else: continue if not json_files: raise NoJSONFileFoundError(""No JSON files found!"") return json_files","Return a list of files to validate from `files`. If a member of `files` is a directory, its children with a ``.json`` extension will be added to the return value. Args: files: A list of file paths and/or directory paths. recursive: If ``true``, this will descend into any subdirectories of input directories. Returns: A list of file paths to validate." "def _ExtractYandexSearchQuery(self, url): """"""Extracts a search query from a Yandex search URL. Yandex: https://www.yandex.com/search/?text=query Args: url (str): URL. Returns: str: search query or None if no query was found. """""" if 'text=' not in url: return None _, _, line = url.partition('text=') before_and, _, _ = line.partition('&') if not before_and: return None yandex_search_url = before_and.split()[0] return yandex_search_url.replace('+', ' ')","Extracts a search query from a Yandex search URL. Yandex: https://www.yandex.com/search/?text=query Args: url (str): URL. Returns: str: search query or None if no query was found." "def get_coiledcoil_region(self, cc_number=0, cutoff=7.0, min_kihs=2): """""" Assembly containing only assigned regions (i.e. regions with contiguous KnobsIntoHoles. """""" g = self.filter_graph(self.graph, cutoff=cutoff, min_kihs=min_kihs) ccs = sorted(networkx.connected_component_subgraphs(g, copy=True), key=lambda x: len(x.nodes()), reverse=True) cc = ccs[cc_number] helices = [x for x in g.nodes() if x.number in cc.nodes()] assigned_regions = self.get_assigned_regions(helices=helices, include_alt_states=False, complementary_only=True) coiledcoil_monomers = [h.get_slice_from_res_id(*assigned_regions[h.number]) for h in helices] return Assembly(coiledcoil_monomers)",Assembly containing only assigned regions (i.e. regions with contiguous KnobsIntoHoles. "def apply_procs(self, procs, kwargs, inputstring, log=True): """"""Apply processors to inputstring."""""" for get_proc in procs: proc = get_proc(self) inputstring = proc(inputstring, **kwargs) if log: logger.log_tag(proc.__name__, inputstring, multiline=True) return inputstring",Apply processors to inputstring. "def set_as_object(self, *args): """""" Sets a new value to map element specified by its index. When the index is not defined, it resets the entire map value. This method has double purpose because method overrides are not supported in JavaScript. :param args: objects to set """""" if len(args) == 1: self.set_as_map(args[0]) elif len(args) == 2: self.put(args[0], args[1])","Sets a new value to map element specified by its index. When the index is not defined, it resets the entire map value. This method has double purpose because method overrides are not supported in JavaScript. :param args: objects to set" "def gather_cache(self): ''' Gather the specified data from the minion data cache ''' cache = {'grains': {}, 'pillar': {}} if self.grains or self.pillar: if self.opts.get('minion_data_cache'): minions = self.cache.list('minions') if not minions: return cache for minion in minions: total = self.cache.fetch('minions/{0}'.format(minion), 'data') if 'pillar' in total: if self.pillar_keys: for key in self.pillar_keys: if key in total['pillar']: cache['pillar'][minion][key] = total['pillar'][key] else: cache['pillar'][minion] = total['pillar'] else: cache['pillar'][minion] = {} if 'grains' in total: if self.grain_keys: for key in self.grain_keys: if key in total['grains']: cache['grains'][minion][key] = total['grains'][key] else: cache['grains'][minion] = total['grains'] else: cache['grains'][minion] = {} return cache",Gather the specified data from the minion data cache "def render_customizations(self): """""" Customize prod_inner for site specific customizations """""" disable_plugins = self.customize_conf.get('disable_plugins', []) if not disable_plugins: logger.debug(""No site-specific plugins to disable"") else: for plugin_dict in disable_plugins: try: self.dj.remove_plugin( plugin_dict['plugin_type'], plugin_dict['plugin_name'] ) logger.debug( ""site-specific plugin disabled -> Type:{} Name:{}"".format( plugin_dict['plugin_type'], plugin_dict['plugin_name'] ) ) except KeyError: # Malformed config logger.debug(""Invalid custom configuration found for disable_plugins"") enable_plugins = self.customize_conf.get('enable_plugins', []) if not enable_plugins: logger.debug(""No site-specific plugins to enable"") else: for plugin_dict in enable_plugins: try: self.dj.add_plugin( plugin_dict['plugin_type'], plugin_dict['plugin_name'], plugin_dict['plugin_args'] ) logger.debug( ""site-specific plugin enabled -> Type:{} Name:{} Args: {}"".format( plugin_dict['plugin_type'], plugin_dict['plugin_name'], plugin_dict['plugin_args'] ) ) except KeyError: # Malformed config logger.debug(""Invalid custom configuration found for enable_plugins"")",Customize prod_inner for site specific customizations "def build_input(data, batch_size, dataset, train): """"""Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported. """""" image_size = 32 depth = 3 num_classes = 10 if dataset == ""cifar10"" else 100 images, labels = data num_samples = images.shape[0] - images.shape[0] % batch_size dataset = tf.contrib.data.Dataset.from_tensor_slices( (images[:num_samples], labels[:num_samples])) def map_train(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4, image_size + 4) image = tf.random_crop(image, [image_size, image_size, 3]) image = tf.image.random_flip_left_right(image) image = tf.image.per_image_standardization(image) return (image, label) def map_test(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size) image = tf.image.per_image_standardization(image) return (image, label) dataset = dataset.map(map_train if train else map_test) dataset = dataset.batch(batch_size) dataset = dataset.repeat() if train: dataset = dataset.shuffle(buffer_size=16 * batch_size) images, labels = dataset.make_one_shot_iterator().get_next() images = tf.reshape(images, [batch_size, image_size, image_size, depth]) labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) labels = tf.sparse_to_dense( tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0) assert len(images.get_shape()) == 4 assert images.get_shape()[0] == batch_size assert images.get_shape()[-1] == 3 assert len(labels.get_shape()) == 2 assert labels.get_shape()[0] == batch_size assert labels.get_shape()[1] == num_classes if not train: tf.summary.image(""images"", images) return images, labels","Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported." "def is_same_function(f1, f2): """"""returns true if f1 and f2 is same function Use case: sometimes when user defines some virtual function in base class, it overrides it in a derived one. Sometimes we need to know whether two member functions is actually same function. """""" if f1 is f2: return True if f1.__class__ is not f2.__class__: return False if isinstance(f1, calldef_members.member_calldef_t) and \ f1.has_const != f2.has_const: return False if f1.name != f2.name: return False if not is_same_return_type(f1, f2): return False if len(f1.arguments) != len(f2.arguments): return False for f1_arg, f2_arg in zip(f1.arguments, f2.arguments): if not type_traits.is_same(f1_arg.decl_type, f2_arg.decl_type): return False return True","returns true if f1 and f2 is same function Use case: sometimes when user defines some virtual function in base class, it overrides it in a derived one. Sometimes we need to know whether two member functions is actually same function." "def _check_span_id(self, span_id): """"""Check the format of the span_id to ensure it is 16-character hex value representing a 64-bit number. If span_id is invalid, logs a warning message and returns None :type span_id: str :param span_id: Identifier for the span, unique within a span. :rtype: str :returns: Span_id for the current span. """""" if span_id is None: return None assert isinstance(span_id, six.string_types) if span_id is INVALID_SPAN_ID: logging.warning( 'Span_id {} is invalid (cannot be all zero)'.format(span_id)) self.from_header = False return None match = SPAN_ID_PATTERN.match(span_id) if match: return span_id else: logging.warning( 'Span_id {} does not the match the ' 'required format'.format(span_id)) self.from_header = False return None","Check the format of the span_id to ensure it is 16-character hex value representing a 64-bit number. If span_id is invalid, logs a warning message and returns None :type span_id: str :param span_id: Identifier for the span, unique within a span. :rtype: str :returns: Span_id for the current span." "def get_room_member_ids(self, room_id, start=None, timeout=None): """"""Call get room member IDs API. https://devdocs.line.me/en/#get-group-room-member-ids Gets the user IDs of the members of a group that the bot is in. This includes the user IDs of users who have not added the bot as a friend or has blocked the bot. :param str room_id: Room ID :param str start: continuationToken :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.MemberIds` :return: MemberIds instance """""" params = None if start is None else {'start': start} response = self._get( '/v2/bot/room/{room_id}/members/ids'.format(room_id=room_id), params=params, timeout=timeout ) return MemberIds.new_from_json_dict(response.json)","Call get room member IDs API. https://devdocs.line.me/en/#get-group-room-member-ids Gets the user IDs of the members of a group that the bot is in. This includes the user IDs of users who have not added the bot as a friend or has blocked the bot. :param str room_id: Room ID :param str start: continuationToken :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.MemberIds` :return: MemberIds instance" "def get_groups_dict(self) -> Dict: """""" Returns serialized dictionary of groups from inventory """""" return { k: deserializer.inventory.InventoryElement.serialize(v).dict() for k, v in self.groups.items() }",Returns serialized dictionary of groups from inventory "def draw(self, parent, box): '''redraw the text''' import wx if self.textctrl is None: self.textctrl = wx.TextCtrl(parent, style=wx.TE_MULTILINE|wx.TE_READONLY) self.textctrl.WriteText(self.text) self._resize() box.Add(self.textctrl, flag=wx.LEFT, border=0) box.Layout()",redraw the text "def delete(self, url): """""" Make a HTTP DELETE request to the Readability API. :param url: The url to which to send a DELETE request. """""" logger.debug('Making DELETE request to %s', url) return self.oauth_session.delete(url)","Make a HTTP DELETE request to the Readability API. :param url: The url to which to send a DELETE request." "def import_project(self, file, path, namespace=None, overwrite=False, override_params=None, **kwargs): """"""Import a project from an archive file. Args: file: Data or file object containing the project path (str): Name and path for the new project namespace (str): The ID or path of the namespace that the project will be imported to overwrite (bool): If True overwrite an existing project with the same path override_params (dict): Set the specific settings for the project **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server failed to perform the request Returns: dict: A representation of the import status. """""" files = { 'file': ('file.tar.gz', file) } data = { 'path': path, 'overwrite': overwrite } if override_params: for k, v in override_params.items(): data['override_params[%s]' % k] = v if namespace: data['namespace'] = namespace return self.gitlab.http_post('/projects/import', post_data=data, files=files, **kwargs)","Import a project from an archive file. Args: file: Data or file object containing the project path (str): Name and path for the new project namespace (str): The ID or path of the namespace that the project will be imported to overwrite (bool): If True overwrite an existing project with the same path override_params (dict): Set the specific settings for the project **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server failed to perform the request Returns: dict: A representation of the import status." "def create(cls, name, key_chain_entry): """""" Create a key chain with list of keys Key_chain_entry format is:: [{'key': 'xxxx', 'key_id': 1-255, 'send_key': True|False}] :param str name: Name of key chain :param list key_chain_entry: list of key chain entries :raises CreateElementFailed: create failed with reason :return: instance with meta :rtype: OSPFKeyChain """""" key_chain_entry = key_chain_entry or [] json = {'name': name, 'ospfv2_key_chain_entry': key_chain_entry} return ElementCreator(cls, json)","Create a key chain with list of keys Key_chain_entry format is:: [{'key': 'xxxx', 'key_id': 1-255, 'send_key': True|False}] :param str name: Name of key chain :param list key_chain_entry: list of key chain entries :raises CreateElementFailed: create failed with reason :return: instance with meta :rtype: OSPFKeyChain" "def furtherArgsProcessing(args): """""" Converts args, and deals with incongruities that argparse couldn't handle """""" if isinstance(args, str): unprocessed = args.strip().split(' ') if unprocessed[0] == 'cyther': del unprocessed[0] args = parser.parse_args(unprocessed).__dict__ elif isinstance(args, argparse.Namespace): args = args.__dict__ elif isinstance(args, dict): pass else: raise CytherError( ""Args must be a instance of str or argparse.Namespace, not '{}'"".format( str(type(args)))) if args['watch']: args['timestamp'] = True args['watch_stats'] = {'counter': 0, 'errors': 0, 'compiles': 0, 'polls': 0} args['print_args'] = True return args","Converts args, and deals with incongruities that argparse couldn't handle" "def release(no_master, release_type): '''Releases a new version''' try: locale.setlocale(locale.LC_ALL, '') except: print(""Warning: Unable to set locale. Expect encoding problems."") git.is_repo_clean(master=(not no_master)) config = utils.get_config() config.update(utils.get_dist_metadata()) config['project_dir'] = Path(os.getcwd()) config['release_type'] = release_type with tempfile.TemporaryDirectory(prefix='ap_tmp') as tmp_dir: config['tmp_dir'] = tmp_dir values = release_ui(config) if type(values) is not str: utils.release(project_name=config['project_name'], tmp_dir=tmp_dir, project_dir=config['project_dir'], pypi_servers=config['pypi_servers'], **values) print('New release options:') pprint.pprint(values) else: print(values)",Releases a new version "def _draw_header(self): """""" Draw the title bar at the top of the screen """""" n_rows, n_cols = self.term.stdscr.getmaxyx() # Note: 2 argument form of derwin breaks PDcurses on Windows 7! window = self.term.stdscr.derwin(1, n_cols, self._row, 0) window.erase() # curses.bkgd expects bytes in py2 and unicode in py3 window.bkgd(str(' '), self.term.attr('TitleBar')) sub_name = self.content.name sub_name = sub_name.replace('/r/front', 'Front Page') parts = sub_name.split('/') if len(parts) == 1: pass elif '/m/' in sub_name: _, _, user, _, multi = parts sub_name = '{} Curated by {}'.format(multi, user) elif parts[1] == 'u': noun = 'My' if parts[2] == 'me' else parts[2] + ""'s"" user_room = parts[3] if len(parts) == 4 else 'overview' title_lookup = { 'overview': 'Overview', 'submitted': 'Submissions', 'comments': 'Comments', 'saved': 'Saved Content', 'hidden': 'Hidden Content', 'upvoted': 'Upvoted Content', 'downvoted': 'Downvoted Content' } sub_name = ""{} {}"".format(noun, title_lookup[user_room]) query = self.content.query if query: sub_name = 'Searching {0}: {1}'.format(sub_name, query) self.term.add_line(window, sub_name, 0, 0) # Set the terminal title if len(sub_name) > 50: title = sub_name.strip('/') title = title.replace('_', ' ') try: title = title.rsplit('/', 1)[1] except IndexError: pass else: title = sub_name # Setting the terminal title will break emacs or systems without # X window. if os.getenv('DISPLAY') and not os.getenv('INSIDE_EMACS'): title += ' - rtv {0}'.format(__version__) title = self.term.clean(title) if six.PY3: # In py3 you can't write bytes to stdout title = title.decode('utf-8') title = '\x1b]2;{0}\x07'.format(title) else: title = b'\x1b]2;{0}\x07'.format(title) sys.stdout.write(title) sys.stdout.flush() if self.reddit and self.reddit.user is not None: # The starting position of the name depends on if we're converting # to ascii or not width = len if self.config['ascii'] else textual_width if self.config['hide_username']: username = ""Logged in"" else: username = self.reddit.user.name s_col = (n_cols - width(username) - 1) # Only print username if it fits in the empty space on the right if (s_col - 1) >= width(sub_name): self.term.add_line(window, username, 0, s_col) self._row += 1",Draw the title bar at the top of the screen "def output_to_dict(self, output): """""" Convert the ROUGE output into python dictionary for further processing. """""" #0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r""(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "" r""\(95%-conf.int. (\d.\d+) - (\d.\d+)\)"") results = {} for line in output.split(""\n""): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace(""-"", '_') key = ""{}_{}"".format(rouge_type, measure) results[key] = float(result) results[""{}_cb"".format(key)] = float(conf_begin) results[""{}_ce"".format(key)] = float(conf_end) return results","Convert the ROUGE output into python dictionary for further processing." "def set_vmname(self, vmname): """""" Renames the VirtualBox VM. :param vmname: VirtualBox VM name """""" if vmname == self._vmname: return if self.linked_clone: if self.status == ""started"": raise VirtualBoxError(""You can't change the name of running VM {}"".format(self._name)) # We can't rename a VM to name that already exists vms = yield from self.manager.list_vms(allow_clone=True) if vmname in [vm[""vmname""] for vm in vms]: raise VirtualBoxError(""You can't change the name to {} it's already use in VirtualBox"".format(vmname)) yield from self._modify_vm('--name ""{}""'.format(vmname)) log.info(""VirtualBox VM '{name}' [{id}] has set the VM name to '{vmname}'"".format(name=self.name, id=self.id, vmname=vmname)) self._vmname = vmname","Renames the VirtualBox VM. :param vmname: VirtualBox VM name" "def __SetDefaultUploadStrategy(self, upload_config, http_request): """"""Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None. """""" if upload_config.resumable_path is None: self.strategy = SIMPLE_UPLOAD if self.strategy is not None: return strategy = SIMPLE_UPLOAD if (self.total_size is not None and self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): strategy = RESUMABLE_UPLOAD if http_request.body and not upload_config.simple_multipart: strategy = RESUMABLE_UPLOAD if not upload_config.simple_path: strategy = RESUMABLE_UPLOAD self.strategy = strategy","Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None." "def xpointerNewCollapsedRange(self): """"""Create a new xmlXPathObjectPtr of type range using a single nodes """""" ret = libxml2mod.xmlXPtrNewCollapsedRange(self._o) if ret is None:raise treeError('xmlXPtrNewCollapsedRange() failed') return xpathObjectRet(ret)","Create a new xmlXPathObjectPtr of type range using a single nodes" "def handle_interlude( self, obj, folder, index, ensemble, loop=None, **kwargs ): """"""Handle an interlude event. Interlude functions permit branching. They return a folder which the application can choose to adopt as the next supplier of dialogue. This handler calls the interlude with the supplied arguments and returns the result. :param obj: A callable object. :param folder: A :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object. :param int index: Indicates which scene script in the folder is being processed. :param ensemble: A sequence of Python objects. :param branches: A sequence of :py:class:`~turberfield.dialogue.model.SceneScript.Folder` objects. from which to pick a branch in the action. :return: A :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object. """""" if obj is None: return folder.metadata else: return obj(folder, index, ensemble, loop=loop, **kwargs)","Handle an interlude event. Interlude functions permit branching. They return a folder which the application can choose to adopt as the next supplier of dialogue. This handler calls the interlude with the supplied arguments and returns the result. :param obj: A callable object. :param folder: A :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object. :param int index: Indicates which scene script in the folder is being processed. :param ensemble: A sequence of Python objects. :param branches: A sequence of :py:class:`~turberfield.dialogue.model.SceneScript.Folder` objects. from which to pick a branch in the action. :return: A :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object." "def decode(self, s, encode_nominal=False, return_type=DENSE): '''Returns the Python representation of a given ARFF file. When a file object is passed as an argument, this method reads lines iteratively, avoiding to load unnecessary information to the memory. :param s: a string or file object with the ARFF file. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, `arff.DENSE_GEN` or `arff.LOD_GEN`. Consult the sections on `working with sparse data`_ and `loading progressively`_. ''' try: return self._decode(s, encode_nominal=encode_nominal, matrix_type=return_type) except ArffException as e: e.line = self._current_line raise e","Returns the Python representation of a given ARFF file. When a file object is passed as an argument, this method reads lines iteratively, avoiding to load unnecessary information to the memory. :param s: a string or file object with the ARFF file. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, `arff.DENSE_GEN` or `arff.LOD_GEN`. Consult the sections on `working with sparse data`_ and `loading progressively`_." "def extract_cookies(self, response, request, referrer_host=None): '''Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request)","Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL." "def write(self, text): """"""Override the standard write method to filter the content."""""" index = text.find('\n') if index == -1: self._buffer = self._buffer + text else: self._buffer = self._buffer + text[:index + 1] if self._pattern: # pattern already compiled no need to check result = re.search(self._pattern, self._buffer) if result: for group in result.groups(): if group: self._buffer = self._buffer.replace(group, ""***"") self._file.write(self._buffer) self._file.flush() self._buffer = text[index + 1:]",Override the standard write method to filter the content. "def json(self): """"""Return a list of JSON objects output by this service."""""" lines = [] for line in self.lines(): try: if len(line) == 1: lines.append(json.loads(line, strict=False)) else: lines.append(json.loads(line[1], strict=False)) except ValueError: pass return lines",Return a list of JSON objects output by this service. "def modules(cls): """"""Collect all the public class attributes. All class attributes should be a DI modules, this method collects them and returns as a list. :return: list of DI modules :rtype: list[Union[Module, Callable]] """""" members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules')) modules = [module for name, module in members if not name.startswith('_')] return modules","Collect all the public class attributes. All class attributes should be a DI modules, this method collects them and returns as a list. :return: list of DI modules :rtype: list[Union[Module, Callable]]" "def _asdict_anything(val, filter, dict_factory, retain_collection_types): """""" ``asdict`` only works on attrs instances, this works on anything. """""" if getattr(val.__class__, ""__attrs_attrs__"", None) is not None: # Attrs class. rv = asdict(val, True, filter, dict_factory, retain_collection_types) elif isinstance(val, (tuple, list, set)): cf = val.__class__ if retain_collection_types is True else list rv = cf( [ _asdict_anything( i, filter, dict_factory, retain_collection_types ) for i in val ] ) elif isinstance(val, dict): df = dict_factory rv = df( ( _asdict_anything(kk, filter, df, retain_collection_types), _asdict_anything(vv, filter, df, retain_collection_types), ) for kk, vv in iteritems(val) ) else: rv = val return rv","``asdict`` only works on attrs instances, this works on anything." "def easeOutBounce(n): """"""A bouncing tween function that hits the destination and then bounces to rest. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """""" _checkRange(n) if n < (1/2.75): return 7.5625 * n * n elif n < (2/2.75): n -= (1.5/2.75) return 7.5625 * n * n + 0.75 elif n < (2.5/2.75): n -= (2.25/2.75) return 7.5625 * n * n + 0.9375 else: n -= (2.65/2.75) return 7.5625 * n * n + 0.984375","A bouncing tween function that hits the destination and then bounces to rest. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine()." "def build_attrs(self, *args, **kwargs): """"""Add select2's tag attributes."""""" self.attrs.setdefault('data-minimum-input-length', 1) self.attrs.setdefault('data-tags', 'true') self.attrs.setdefault('data-token-separators', '["","", "" ""]') return super(Select2TagMixin, self).build_attrs(*args, **kwargs)",Add select2's tag attributes. "def raise_on_packet(self, pkt_cls, state, get_next_msg=True): """""" If the next message to be processed has type 'pkt_cls', raise 'state'. If there is no message waiting to be processed, we try to get one with the default 'get_next_msg' parameters. """""" # Maybe we already parsed the expected packet, maybe not. if get_next_msg: self.get_next_msg() if (not self.buffer_in or not isinstance(self.buffer_in[0], pkt_cls)): return self.cur_pkt = self.buffer_in[0] self.buffer_in = self.buffer_in[1:] raise state()","If the next message to be processed has type 'pkt_cls', raise 'state'. If there is no message waiting to be processed, we try to get one with the default 'get_next_msg' parameters." "def report(policies, start_date, options, output_fh, raw_output_fh=None): """"""Format a policy's extant records into a report."""""" regions = set([p.options.region for p in policies]) policy_names = set([p.name for p in policies]) formatter = Formatter( policies[0].resource_manager.resource_type, extra_fields=options.field, include_default_fields=not options.no_default_fields, include_region=len(regions) > 1, include_policy=len(policy_names) > 1 ) records = [] for policy in policies: # initialize policy execution context for output access policy.ctx.initialize() if policy.ctx.output.type == 's3': policy_records = record_set( policy.session_factory, policy.ctx.output.config['netloc'], policy.ctx.output.config['path'].strip('/'), start_date) else: policy_records = fs_record_set(policy.ctx.log_dir, policy.name) log.debug(""Found %d records for region %s"", len(policy_records), policy.options.region) for record in policy_records: record['policy'] = policy.name record['region'] = policy.options.region records += policy_records rows = formatter.to_csv(records) if options.format == 'csv': writer = UnicodeWriter(output_fh, formatter.headers()) writer.writerow(formatter.headers()) writer.writerows(rows) elif options.format == 'json': print(dumps(records, indent=2)) else: # We special case CSV, and for other formats we pass to tabulate print(tabulate(rows, formatter.headers(), tablefmt=options.format)) if raw_output_fh is not None: dumps(records, raw_output_fh, indent=2)",Format a policy's extant records into a report. "def Validate(self): """"""GlobExpression is valid."""""" if len(self.RECURSION_REGEX.findall(self._value)) > 1: raise ValueError(""Only one ** is permitted per path: %s."" % self._value)",GlobExpression is valid. "def connect(self): """"""Initializes a connection to the smtp server :return: True on success, False otherwise """""" connection_method = 'SMTP_SSL' if self.ssl else 'SMTP' self._logger.debug('Trying to connect via {}'.format(connection_method)) smtp = getattr(smtplib, connection_method) if self.port: self._smtp = smtp(self.address, self.port) else: self._smtp = smtp(self.address) self._smtp.ehlo() if self.tls: self._smtp.starttls() self._smtp.ehlo() self._logger.info('Got smtp connection') if self.username and self.password: self._logger.info('Logging in') self._smtp.login(self.username, self.password) self._connected = True","Initializes a connection to the smtp server :return: True on success, False otherwise" "def Disjunction(expr1: Expression, expr2: Expression) -> Expression: """"""Return expression which is the disjunction of `expr1` and `expr2`."""""" expr = Expression(ast.BoolOp(ast.Or(), [expr1.body, expr2.body])) return ast.fix_missing_locations(expr)",Return expression which is the disjunction of `expr1` and `expr2`. "def create_api_resources(restApiId, path, region=None, key=None, keyid=None, profile=None): ''' Given rest api id, and an absolute resource path, create all the resources and return all resources in the resourcepath, returns False on failure. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_resources myapi_id resource_path ''' path_parts = path.split('/') created = [] current_path = '' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) for path_part in path_parts: if current_path == '/': current_path = '{0}{1}'.format(current_path, path_part) else: current_path = '{0}/{1}'.format(current_path, path_part) r = describe_api_resource(restApiId, current_path, region=region, key=key, keyid=keyid, profile=profile) resource = r.get('resource') if not resource: resource = conn.create_resource(restApiId=restApiId, parentId=created[-1]['id'], pathPart=path_part) created.append(resource) if created: return {'created': True, 'restApiId': restApiId, 'resources': created} else: return {'created': False, 'error': 'unexpected error.'} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}","Given rest api id, and an absolute resource path, create all the resources and return all resources in the resourcepath, returns False on failure. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_resources myapi_id resource_path" "def _FormatTypeCheck(type_): """"""Pretty format of type check."""""" if isinstance(type_, tuple): items = [_FormatTypeCheck(t) for t in type_] return ""(%s)"" % "", "".join(items) elif hasattr(type_, ""__name__""): return type_.__name__ else: return repr(type_)",Pretty format of type check. "def get_packages(session): """"""Get deliveries in progress and completed."""""" resp = session.get(DELIVERIES_URL, params=_get_params(session.auth.locale)) parsed = BeautifulSoup(resp.text, HTML_PARSER) token_elem = parsed.find(TOKEN_FIND_TAG, TOKEN_FIND_ATTR) tid_elem = parsed.find(TID_FIND_TAG, TID_FIND_ATTR) if not token_elem or not tid_elem: raise UPSError('failed to find token or tid') token = token_elem.get(VALUE_ATTR) tid = tid_elem.get(VALUE_ATTR) resp = session.post(SERVICE_URL, { 'token': token, 'uid': session.auth.username, 'callType': 'allShipments', 'tid': tid, 'loc': session.auth.locale }) try: packages = [] data = json.loads(resp.text[UPS_JSON_PREAMBLE_SIZE:]) shipments = data['shipmentContainer']['inboundShipments'] + \ data['shipmentContainer']['historyShipments'] for shipment in shipments: from_location = '{}, {}, {}'.format(shipment['sfc'], shipment['sfs'], shipment['sfcn']) estimated_date = _parsed_date(shipment['sddfd']) actual_date = _parsed_date(shipment['dd']) packages.append({ 'tracking_number': shipment['tn'], 'status': shipment['sts'], 'from': shipment['sfn'], 'from_location': from_location, 'estimated_delivery_date': estimated_date, 'estimated_delivery_timeframe': shipment['sdtfd'], 'delivery_date': actual_date }) return packages except JSONDecodeError: raise UPSError('failed to parse json')",Get deliveries in progress and completed. "def create_from(cls, src_path): """"""Returns Torrent object created from a file or a directory. :param str src_path: :rtype: Torrent """""" is_dir = isdir(src_path) target_files, size_data = cls._get_target_files_info(src_path) SIZE_MIN = 32768 # 32 KiB SIZE_DEFAULT = 262144 # 256 KiB SIZE_MAX = 1048576 # 1 MiB CHUNKS_MIN = 1000 # todo use those limits as advised CHUNKS_MAX = 2200 size_piece = SIZE_MIN if size_data > SIZE_MIN: size_piece = SIZE_DEFAULT if size_piece > SIZE_MAX: size_piece = SIZE_MAX def read(filepath): with open(filepath, 'rb') as f: while True: chunk = f.read(size_piece - len(pieces_buffer)) chunk_size = len(chunk) if chunk_size == 0: break yield chunk pieces = bytearray() pieces_buffer = bytearray() for fpath, _, _ in target_files: for chunk in read(fpath): pieces_buffer += chunk if len(pieces_buffer) == size_piece: pieces += sha1(pieces_buffer).digest()[:20] pieces_buffer = bytearray() if len(pieces_buffer): pieces += sha1(pieces_buffer).digest()[:20] pieces_buffer = bytearray() info = { 'name': basename(src_path), 'pieces': bytes(pieces), 'piece length': size_piece, } if is_dir: files = [] for _, length, path in target_files: files.append({'length': length, 'path': path}) info['files'] = files else: info['length'] = target_files[0][1] torrent = cls({'info': info}) torrent.created_by = get_app_version() torrent.creation_date = datetime.utcnow() return torrent","Returns Torrent object created from a file or a directory. :param str src_path: :rtype: Torrent" "def stop(self): """"""Ask politely, first, with SIGINT and SIGQUIT."""""" if hasattr(self, 'process'): if self.process is not None: try: is_running = self.process.poll() is None except AttributeError: is_running = False if is_running: self.bundle_engine.logline(""Stopping {0}"".format(self.service.name)) self.term_signal_sent = True # Politely ask all child processes to die first try: for childproc in psutil.Process(self.process.pid).children(recursive=True): childproc.send_signal(signal.SIGINT) except psutil.NoSuchProcess: pass except AttributeError: pass try: self.process.send_signal(self.service.stop_signal) except OSError as e: if e.errno == 3: # No such process pass else: self.bundle_engine.warnline(""{0} stopped prematurely."".format(self.service.name)) else: self.bundle_engine.warnline(""{0} stopped prematurely."".format(self.service.name)) else: self.bundle_engine.warnline(""{0} was never successfully started."".format(self.service.name))","Ask politely, first, with SIGINT and SIGQUIT." "def get_processor_feature(self, feature): """"""Query whether a CPU feature is supported or not. in feature of type :class:`ProcessorFeature` CPU Feature identifier. return supported of type bool Feature is supported or not. """""" if not isinstance(feature, ProcessorFeature): raise TypeError(""feature can only be an instance of type ProcessorFeature"") supported = self._call(""getProcessorFeature"", in_p=[feature]) return supported","Query whether a CPU feature is supported or not. in feature of type :class:`ProcessorFeature` CPU Feature identifier. return supported of type bool Feature is supported or not." "def generate_hash_id(node): """""" Generates a hash_id for the node in question. :param node: lxml etree node """""" try: content = tostring(node) except Exception: logger.exception(""Generating of hash failed"") content = to_bytes(repr(node)) hash_id = md5(content).hexdigest() return hash_id[:8]","Generates a hash_id for the node in question. :param node: lxml etree node" "def extract_string_pairs_in_directory(directory_path, extract_func, filter_func): """""" Retrieves all string pairs in the directory Args: directory_path (str): The path of the directory containing the file to extract string pairs from. extract_func (function): Function for extracting the localization keys and comments from the files. The extract function receives 2 parameters: - dict that the keys (a key in the dict) and comments (a value in the dict) are added to. - str representing file path filter_func (function): Function for filtering files in the directory. The filter function receives the file name and returns a bool representing the filter result. True if the file name passed the filter, False otherwise. Returns: dict: A mapping between string pairs first value (probably the key), and the second value (probably the comment). """""" result = {} for root, dirnames, filenames in os.walk(directory_path): for file_name in filenames: if filter_func(file_name): file_path = os.path.join(root, file_name) try: extract_func(result, file_path) except Exception as e: print ""Error in file "" + file_name print e return result","Retrieves all string pairs in the directory Args: directory_path (str): The path of the directory containing the file to extract string pairs from. extract_func (function): Function for extracting the localization keys and comments from the files. The extract function receives 2 parameters: - dict that the keys (a key in the dict) and comments (a value in the dict) are added to. - str representing file path filter_func (function): Function for filtering files in the directory. The filter function receives the file name and returns a bool representing the filter result. True if the file name passed the filter, False otherwise. Returns: dict: A mapping between string pairs first value (probably the key), and the second value (probably the comment)." "def _session(self): """"""The current session used by the client. The Session object allows you to persist certain parameters across requests. It also persists cookies across all requests made from the Session instance, and will use urllib3's connection pooling. So if you're making several requests to the same host, the underlying TCP connection will be reused, which can result in a significant performance increase. """""" if self._http_session is None: self._http_session = requests.Session() self._http_session.headers.update(self._get_headers()) self._http_session.verify = self._verify_https_request() if all(self._credentials): username, password = self._credentials self._http_session.auth = requests_ntlm.HttpNtlmAuth( username=username, password=password) return self._http_session","The current session used by the client. The Session object allows you to persist certain parameters across requests. It also persists cookies across all requests made from the Session instance, and will use urllib3's connection pooling. So if you're making several requests to the same host, the underlying TCP connection will be reused, which can result in a significant performance increase." "def graph_repr(self): """"""Short repr to use when rendering Pipeline graphs."""""" # Replace any floating point numbers in the expression # with their scientific notation final = re.sub(r""[-+]?\d*\.\d+"", lambda x: format(float(x.group(0)), '.2E'), self._expr) # Graphviz interprets `\l` as ""divide label into lines, left-justified"" return ""Expression:\\l {}\\l"".format( final, )",Short repr to use when rendering Pipeline graphs. "def handle_resourceset(ltext, **kwargs): ''' A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs ''' fullprop=kwargs.get('fullprop') rid=kwargs.get('rid') base=kwargs.get('base', VERSA_BASEIRI) model=kwargs.get('model') iris = ltext.strip().split() for i in iris: model.add(rid, fullprop, I(iri.absolutize(i, base))) return None","A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs" "def atype_save(self): """"""Save the current atype :returns: None :rtype: None :raises: None """""" if not self.cur_atype: return desc = self.atype_desc_pte.toPlainText() self.cur_atype.description = desc self.cur_atype.save()","Save the current atype :returns: None :rtype: None :raises: None" "def search_fast(self, text): """"""do a sloppy quick ""search"" via the json index"""""" resp = self.impl.get( ""{base_url}/{text}/json"".format(base_url=self.base_url, text=text) ) return resp.json()[""info""][""package_url""]","do a sloppy quick ""search"" via the json index" "def messageToFile(message): """""" Flattens a message into a file-like object. """""" outFile = StringIO() messageGenerator = generator.Generator(outFile, False) messageGenerator.flatten(message) outFile.seek(0, 0) return outFile",Flattens a message into a file-like object. "def supported_providers(self): """""" Request a list of all available providers :returns: A list of all available providers (e.g. {'provider': 'ec2_ap_northeast', 'title': 'EC2 AP NORTHEAST'}) """""" req = self.request(self.uri + '/providers', api_version=2) providers = req.get().json() supported_providers = providers['supported_providers'] return supported_providers","Request a list of all available providers :returns: A list of all available providers (e.g. {'provider': 'ec2_ap_northeast', 'title': 'EC2 AP NORTHEAST'})" "def os_deployment_servers(self): """""" Gets the Os Deployment Servers API client. Returns: OsDeploymentServers: """""" if not self.__os_deployment_servers: self.__os_deployment_servers = OsDeploymentServers(self.__connection) return self.__os_deployment_servers","Gets the Os Deployment Servers API client. Returns: OsDeploymentServers:" "def select_sub(self, query, as_): """""" Add a subselect expression to the query :param query: A QueryBuilder instance :type query: QueryBuilder :param as_: The subselect alias :type as_: str :return: The current QueryBuilder instance :rtype: QueryBuilder """""" if isinstance(query, QueryBuilder): bindings = query.get_bindings() query = query.to_sql() elif isinstance(query, basestring): bindings = [] else: raise ArgumentError(""Invalid subselect"") return self.select_raw( ""(%s) AS %s"" % (query, self._grammar.wrap(as_)), bindings )","Add a subselect expression to the query :param query: A QueryBuilder instance :type query: QueryBuilder :param as_: The subselect alias :type as_: str :return: The current QueryBuilder instance :rtype: QueryBuilder" "def disconnect(remote): """"""Disconnect callback handler for GitHub."""""" # User must be authenticated if not current_user.is_authenticated: return current_app.login_manager.unauthorized() external_method = 'github' external_ids = [i.id for i in current_user.external_identifiers if i.method == external_method] if external_ids: oauth_unlink_external_id(dict(id=external_ids[0], method=external_method)) user_id = int(current_user.get_id()) token = RemoteToken.get(user_id, remote.consumer_key) if token: extra_data = token.remote_account.extra_data # Delete the token that we issued for GitHub to deliver webhooks webhook_token_id = extra_data.get('tokens', {}).get('webhook') ProviderToken.query.filter_by(id=webhook_token_id).delete() # Disable GitHub webhooks from our side db_repos = Repository.query.filter_by(user_id=user_id).all() # Keep repositories with hooks to pass to the celery task later on repos_with_hooks = [(r.github_id, r.hook) for r in db_repos if r.hook] for repo in db_repos: try: Repository.disable(user_id=user_id, github_id=repo.github_id, name=repo.name) except NoResultFound: # If the repository doesn't exist, no action is necessary pass db.session.commit() # Send Celery task for webhooks removal and token revocation disconnect_github.delay(token.access_token, repos_with_hooks) # Delete the RemoteAccount (along with the associated RemoteToken) token.remote_account.delete() return redirect(url_for('invenio_oauthclient_settings.index'))",Disconnect callback handler for GitHub. "def dot_v2(vec1, vec2): """"""Return the dot product of two vectors"""""" return vec1.x * vec2.x + vec1.y * vec2.y",Return the dot product of two vectors "def restart(self, all=False): """"""Restarts the given process."""""" if all: data = {'type': self.type} else: data = {'ps': self.process} r = self._h._http_resource( method='POST', resource=('apps', self.app.name, 'ps', 'restart'), data=data ) r.raise_for_status()",Restarts the given process. "def decode_link(self, link): """""" Decodes an RpbLink message into a tuple :param link: an RpbLink message :type link: riak.pb.riak_pb2.RpbLink :rtype tuple """""" if link.HasField(""bucket""): bucket = bytes_to_str(link.bucket) else: bucket = None if link.HasField(""key""): key = bytes_to_str(link.key) else: key = None if link.HasField(""tag""): tag = bytes_to_str(link.tag) else: tag = None return (bucket, key, tag)","Decodes an RpbLink message into a tuple :param link: an RpbLink message :type link: riak.pb.riak_pb2.RpbLink :rtype tuple" "def over(self, window): """""" Add a window clause to be applied to downstream analytic expressions """""" return GroupedTableExpr( self.table, self.by, having=self._having, order_by=self._order_by, window=window, )",Add a window clause to be applied to downstream analytic expressions "def update(self): """"""Determine all AR coefficients. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.ar_coefs.update() >>> derived.ar_coefs ar_coefs([[1.0, 2.0], [1.0, nan]]) Note that updating parameter `ar_coefs` sets the shape of the log sequence |LogOut| automatically. >>> logs.logout logout([[nan, nan], [nan, nan]]) """""" pars = self.subpars.pars coefs = pars.control.responses.ar_coefs self.shape = coefs.shape self(coefs) pars.model.sequences.logs.logout.shape = self.shape","Determine all AR coefficients. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.ar_coefs.update() >>> derived.ar_coefs ar_coefs([[1.0, 2.0], [1.0, nan]]) Note that updating parameter `ar_coefs` sets the shape of the log sequence |LogOut| automatically. >>> logs.logout logout([[nan, nan], [nan, nan]])" "def relation_call(method, relation_name=None, flag=None, state=None, *args): """"""Invoke a method on the class implementing a relation via the CLI"""""" if relation_name: relation = relation_from_name(relation_name) if relation is None: raise ValueError('Relation not found: %s' % relation_name) elif flag or state: relation = relation_from_flag(flag or state) if relation is None: raise ValueError('Relation not found: %s' % (flag or state)) else: raise ValueError('Must specify either relation_name or flag') result = getattr(relation, method)(*args) if isinstance(relation, RelationBase) and method == 'conversations': # special case for conversations to make them work from CLI result = [c.scope for c in result] return result",Invoke a method on the class implementing a relation via the CLI "def _get_next(request): """""" The part that's the least straightforward about views in this module is how they determine their redirects after they have finished computation. In short, they will try and determine the next place to go in the following order: 1. If there is a variable named ``next`` in the *POST* parameters, the view will redirect to that variable's value. 2. If there is a variable named ``next`` in the *GET* parameters, the view will redirect to that variable's value. 3. If Django can determine the previous page from the HTTP headers, the view will redirect to that previous page. """""" next = request.POST.get('next', request.GET.get('next', request.META.get('HTTP_REFERER', None))) if not next: next = request.path return next","The part that's the least straightforward about views in this module is how they determine their redirects after they have finished computation. In short, they will try and determine the next place to go in the following order: 1. If there is a variable named ``next`` in the *POST* parameters, the view will redirect to that variable's value. 2. If there is a variable named ``next`` in the *GET* parameters, the view will redirect to that variable's value. 3. If Django can determine the previous page from the HTTP headers, the view will redirect to that previous page." "def _intermediary_to_dot(tables, relationships): """""" Returns the dot source representing the database in a string. """""" t = '\n'.join(t.to_dot() for t in tables) r = '\n'.join(r.to_dot() for r in relationships) return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r)",Returns the dot source representing the database in a string. "def get_parameter(self, var): """""" This method supports the functional tags by providing the actual values in the function as list of dict in case of table type parameter or as nested dict in case of decision diagram """""" parameter = [] for parameter_tag in var.findall('Parameter'): parameter_type = 'TBL' if parameter_tag.get('type') is not None: parameter_type = parameter_tag.get('type') if parameter_type == 'TBL': parameter = self.get_parameter_tbl(parameter_tag) elif parameter_type == 'DD': parameter = defaultdict(list) parameter = self.get_parameter_dd(parameter_tag) return parameter","This method supports the functional tags by providing the actual values in the function as list of dict in case of table type parameter or as nested dict in case of decision diagram" "def _gauss_filter(data, sigma=4, res_g=None, sub_blocks=(1, 1, 1)): """""" gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """""" truncate = 4. radius = tuple(int(truncate*s +0.5) for s in sigma) size = tuple(2*r+1 for r in radius) s = sigma[0] if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC=""res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))""%(size[0]//2,size[0]//2,s,s), DEFAULT=""0.f"")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC=""res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))""%(size[0]//2,size[0]//2,s,s), DEFAULT=""0.f"")) else: raise ValueError(""currently only 2 or 3 dimensional data is supported"") return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)","gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray)" "def _update_expression_reference(self, # pylint: disable=no-self-use grammar: Grammar, parent_expression_nonterminal: str, child_expression_nonterminal: str) -> None: """""" When we add a new expression, there may be other expressions that refer to it, and we need to update those to point to the new expression. """""" grammar[parent_expression_nonterminal].members = \ [member if member.name != child_expression_nonterminal else grammar[child_expression_nonterminal] for member in grammar[parent_expression_nonterminal].members]","When we add a new expression, there may be other expressions that refer to it, and we need to update those to point to the new expression." "def get_position(stream=STD_OUTPUT_HANDLE): ''' Returns current position of cursor, starts at 1. ''' stream = kernel32.GetStdHandle(stream) csbi = CONSOLE_SCREEN_BUFFER_INFO() kernel32.GetConsoleScreenBufferInfo(stream, byref(csbi)) pos = csbi.dwCursorPosition # zero based, add ones for compatibility. return (pos.X + 1, pos.Y + 1)","Returns current position of cursor, starts at 1." "def read_sections(self, sections_file, get_goids_only, exclude_ungrouped): """"""Read sections variable from a text file of from a Python file."""""" ext = os.path.splitext(sections_file)[1] file_contents = None if ext == '.py': file_contents = self.read_py(sections_file, get_goids_only, exclude_ungrouped) else: file_contents = self.read_txt(sections_file, get_goids_only, exclude_ungrouped) if file_contents: return file_contents.get('sections', None)",Read sections variable from a text file of from a Python file. "def output(*streams_and_filename, **kwargs): """"""Output file URL Syntax: `ffmpeg.output(stream1[, stream2, stream3...], filename, **ffmpeg_args)` Any supplied keyword arguments are passed to ffmpeg verbatim (e.g. ``t=20``, ``f='mp4'``, ``acodec='pcm'``, ``vcodec='rawvideo'``, etc.). Some keyword-arguments are handled specially, as shown below. Args: video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``. audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``. format: alias for ``-f`` parameter, e.g. ``format='mp4'`` (equivalent to ``f='mp4'``). If multiple streams are provided, they are mapped to the same output. To tell ffmpeg to write to stdout, use ``pipe:`` as the filename. Official documentation: `Synopsis `__ """""" streams_and_filename = list(streams_and_filename) if 'filename' not in kwargs: if not isinstance(streams_and_filename[-1], basestring): raise ValueError('A filename must be provided') kwargs['filename'] = streams_and_filename.pop(-1) streams = streams_and_filename fmt = kwargs.pop('f', None) if fmt: if 'format' in kwargs: raise ValueError(""Can't specify both `format` and `f` kwargs"") kwargs['format'] = fmt return OutputNode(streams, output.__name__, kwargs=kwargs).stream()","Output file URL Syntax: `ffmpeg.output(stream1[, stream2, stream3...], filename, **ffmpeg_args)` Any supplied keyword arguments are passed to ffmpeg verbatim (e.g. ``t=20``, ``f='mp4'``, ``acodec='pcm'``, ``vcodec='rawvideo'``, etc.). Some keyword-arguments are handled specially, as shown below. Args: video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``. audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``. format: alias for ``-f`` parameter, e.g. ``format='mp4'`` (equivalent to ``f='mp4'``). If multiple streams are provided, they are mapped to the same output. To tell ffmpeg to write to stdout, use ``pipe:`` as the filename. Official documentation: `Synopsis `__" "def expect_column_values_to_match_json_schema(self, column, json_schema, mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None ): """"""Expect column entries to be JSON objects matching a given JSON schema. expect_column_values_to_match_json_schema is a :func:`column_map_expectation `. Args: column (str): \ The column name. Keyword Args: mostly (None or a float between 0 and 1): \ Return `""success"": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format `. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format ` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_be_json_parseable The JSON-schema docs at: http://json-schema.org/ """""" raise NotImplementedError","Expect column entries to be JSON objects matching a given JSON schema. expect_column_values_to_match_json_schema is a :func:`column_map_expectation `. Args: column (str): \ The column name. Keyword Args: mostly (None or a float between 0 and 1): \ Return `""success"": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format `. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format ` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_be_json_parseable The JSON-schema docs at: http://json-schema.org/" "def additions_removed(name, force=False): ''' Ensure that the VirtualBox Guest Additions are removed. Uses the CD, connected by VirtualBox. To connect VirtualBox Guest Additions via VirtualBox graphical interface press 'Host+D' ('Host' is usually 'Right Ctrl'). name The name has no functional value and is only used as a tracking reference. force Force VirtualBox Guest Additions removing. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} current_state = __salt__['vbox_guest.additions_version']() if not current_state: ret['result'] = True ret['comment'] = 'System already in the correct state' return ret if __opts__['test']: ret['comment'] = ('The state of VirtualBox Guest Additions will be ' 'changed.') ret['changes'] = { 'old': current_state, 'new': True, } ret['result'] = None return ret new_state = __salt__['vbox_guest.additions_remove'](force=force) ret['comment'] = 'The state of VirtualBox Guest Additions was changed!' ret['changes'] = { 'old': current_state, 'new': new_state, } ret['result'] = bool(new_state) return ret","Ensure that the VirtualBox Guest Additions are removed. Uses the CD, connected by VirtualBox. To connect VirtualBox Guest Additions via VirtualBox graphical interface press 'Host+D' ('Host' is usually 'Right Ctrl'). name The name has no functional value and is only used as a tracking reference. force Force VirtualBox Guest Additions removing." "def resource_associate_permission(self, token, id, name, scopes, **kwargs): """""" Associates a permission with a Resource. https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_authorization_uma_policy_api :param str token: client access token :param str id: resource id :param str name: permission name :param list scopes: scopes access is wanted :param str description:optional :param list roles: (optional) :param list groups: (optional) :param list clients: (optional) :param str condition: (optional) :rtype: dict """""" return self._realm.client.post( '{}/{}'.format(self.well_known['policy_endpoint'], id), data=self._get_data(name=name, scopes=scopes, **kwargs), headers=self.get_headers(token) )","Associates a permission with a Resource. https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_authorization_uma_policy_api :param str token: client access token :param str id: resource id :param str name: permission name :param list scopes: scopes access is wanted :param str description:optional :param list roles: (optional) :param list groups: (optional) :param list clients: (optional) :param str condition: (optional) :rtype: dict" "def _get_fd(fileobj): """""" Get a descriptor out of a file object. :param fileobj: An integer (existing descriptor) or any object having the `fileno()` method. :raises ValueError: if the descriptor cannot be obtained or if the descriptor is invalid :returns: file descriptor number """""" if isinstance(fileobj, int): fd = fileobj else: try: fd = fileobj.fileno() except AttributeError: fd = None if fd is None or fd < 0: raise ValueError(""invalid fileobj: {!r}"".format(fileobj)) return fd","Get a descriptor out of a file object. :param fileobj: An integer (existing descriptor) or any object having the `fileno()` method. :raises ValueError: if the descriptor cannot be obtained or if the descriptor is invalid :returns: file descriptor number" "def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """""" Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link `__. Returns ------- rng : DatetimeIndex """""" warnings.warn(""cdate_range is deprecated and will be removed in a future "" ""version, instead use pd.bdate_range(..., freq='{freq}')"" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)","Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link `__. Returns ------- rng : DatetimeIndex" "def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs): """"""Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)` """""" if ignore_stop_codons: vocab = CODONS else: vocab = CODONS + STOP_CODONS assert seq_length % 3 == 0 return Input((seq_length / 3, len(vocab)), name=name, **kwargs)","Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)`" "def prompt_4_yes_no(question): """""" Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean """""" while True: sys.stdout.write(question + ' (y/n)? ') try: choice = raw_input().lower() except: choice = input().lower() if choice == 'yes' or choice == 'y': return True elif choice == 'no' or choice == 'n': return False else: printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice)","Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean" "def kde_none(events_x, events_y, xout=None, yout=None): """""" No Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) Notes ----- This method is a convenience method that always returns ones in the shape that the other methods in this module produce. """""" valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError(""Both `xout` and `yout` must be (un)set."") if yout is None and yout is None: xout = events_x yout = events_y return np.ones(xout.shape)","No Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) Notes ----- This method is a convenience method that always returns ones in the shape that the other methods in this module produce." "def catalogue_mt_filter(self, mt_table, flag=None): """""" Filter the catalogue using a magnitude-time table. The table has two columns and n-rows. :param nump.ndarray mt_table: Magnitude time table with n-rows where column 1 is year and column 2 is magnitude """""" if flag is None: # No flag defined, therefore all events are initially valid flag = np.ones(self.get_number_events(), dtype=bool) for comp_val in mt_table: id0 = np.logical_and(self.data['year'].astype(float) < comp_val[0], self.data['magnitude'] < comp_val[1]) print(id0) flag[id0] = False if not np.all(flag): self.purge_catalogue(flag)","Filter the catalogue using a magnitude-time table. The table has two columns and n-rows. :param nump.ndarray mt_table: Magnitude time table with n-rows where column 1 is year and column 2 is magnitude" "def _slice2rows(self, start, stop, step=None): """""" Convert a slice to an explicit array of rows """""" nrows = self._info['nrows'] if start is None: start = 0 if stop is None: stop = nrows if step is None: step = 1 tstart = self._fix_range(start) tstop = self._fix_range(stop) if tstart == 0 and tstop == nrows: # this is faster: if all fields are also requested, then a # single fread will be done return None if stop < start: raise ValueError(""start is greater than stop in slice"") return numpy.arange(tstart, tstop, step, dtype='i8')",Convert a slice to an explicit array of rows "def set_object_cache(self, notify_func=None, getbuffer_func=None): """""" Set the object cache ""notifyObjectCompiled"" and ""getBuffer"" callbacks to the given Python functions. """""" self._object_cache_notify = notify_func self._object_cache_getbuffer = getbuffer_func # Lifetime of the object cache is managed by us. self._object_cache = _ObjectCacheRef(self) # Note this doesn't keep a reference to self, to avoid reference # cycles. ffi.lib.LLVMPY_SetObjectCache(self, self._object_cache)","Set the object cache ""notifyObjectCompiled"" and ""getBuffer"" callbacks to the given Python functions." "def restore(self, hist_uid): ''' Restore by ID ''' if self.check_post_role()['ADMIN']: pass else: return False histinfo = MWikiHist.get_by_uid(hist_uid) if histinfo: pass else: return False postinfo = MWiki.get_by_uid(histinfo.wiki_id) cur_cnt = tornado.escape.xhtml_unescape(postinfo.cnt_md) old_cnt = tornado.escape.xhtml_unescape(histinfo.cnt_md) MWiki.update_cnt( histinfo.wiki_id, {'cnt_md': old_cnt, 'user_name': self.userinfo.user_name} ) MWikiHist.update_cnt( histinfo.uid, {'cnt_md': cur_cnt, 'user_name': postinfo.user_name} ) if postinfo.kind == '1': self.redirect('/wiki/{0}'.format(postinfo.title)) elif postinfo.kind == '2': self.redirect('/page/{0}.html'.format(postinfo.uid))",Restore by ID "def _rollback_handle(cls, connection): """"""On snowflake, rolling back the handle of an aborted session raises an exception. """""" try: connection.handle.rollback() except snowflake.connector.errors.ProgrammingError as e: msg = dbt.compat.to_string(e) if 'Session no longer exists' not in msg: raise","On snowflake, rolling back the handle of an aborted session raises an exception." "def _fill_queue(self, loglstar): """"""Sequentially add new live point proposals to the queue."""""" # Add/zip arguments to submit to the queue. point_queue = [] axes_queue = [] while self.nqueue < self.queue_size: if self._beyond_unit_bound(loglstar): # Propose points using the provided sampling/bounding options. point, axes = self.propose_point() evolve_point = self.evolve_point else: # Propose/evaluate points directly from the unit cube. point = self.rstate.rand(self.npdim) axes = np.identity(self.npdim) evolve_point = sample_unif point_queue.append(point) axes_queue.append(axes) self.nqueue += 1 loglstars = [loglstar for i in range(self.queue_size)] scales = [self.scale for i in range(self.queue_size)] ptforms = [self.prior_transform for i in range(self.queue_size)] logls = [self.loglikelihood for i in range(self.queue_size)] kwargs = [self.kwargs for i in range(self.queue_size)] args = zip(point_queue, loglstars, axes_queue, scales, ptforms, logls, kwargs) if self.use_pool_evolve: # Use the pool to propose (""evolve"") a new live point. self.queue = list(self.M(evolve_point, args)) else: # Propose (""evolve"") a new live point using the default `map` # function. self.queue = list(map(evolve_point, args))",Sequentially add new live point proposals to the queue. "def build_actions(self): """"""Create an ActionCollection that will perform sanity checks, copy the file, create a database entry and perform cleanup actions and in case of a failure clean everything up. :param work: the workfile :type work: :class:`JB_File` :param release: the releasefile :type release: :class:`JB_File` :param checks: the action collection object with sanity checks It should accept a :class:`JB_File` as object for execute. :type checks: :class:`ActionCollection` :param cleanup: a action collection object that holds cleanup actions for the given file. It should accept a :class:`JB_File` as object for execute. :type cleanup: :class:`ActionCollection` :param comment: comment for the release :type comment: :class:`str` :returns: An ActionCollection ready to execute. :rtype: :class:`ActionCollection` :raises: None """""" checkau = ActionUnit(""Sanity Checks"", ""Check the workfile. If the file is not conform, ask the user to continue."", self.sanity_check) copyau = ActionUnit(""Copy File"", ""Copy the workfile to the releasefile location."", self.copy, depsuccess=[checkau]) dbau = ActionUnit(""Create DB entry"", ""Create an entry in the database for the releasefile"", self.create_db_entry, depsuccess=[copyau]) cleanau = ActionUnit(""Cleanup"", ""Cleanup the releasefile. If something fails, ask the user to continue."", self.cleanup, depsuccess=[dbau]) deletefau1 = ActionUnit(""Delete the releasefile."", ""In case the db entry creation fails, delete the releasefile."", self.delete_releasefile, depfail=[dbau]) deletefau2 = ActionUnit(""Delete the releasefile."", ""In case the cleanup fails, delete the releasefile."", self.delete_releasefile, depsuccess=[copyau], depfail=[cleanau]) deletedbau = ActionUnit(""Delete the database entry."", ""In case the cleanup fails, delete the database entry"", self.delete_db_entry, depsuccess=[dbau], depfail=[cleanau]) return ActionCollection([checkau, copyau, dbau, cleanau, deletefau1, deletefau2, deletedbau])","Create an ActionCollection that will perform sanity checks, copy the file, create a database entry and perform cleanup actions and in case of a failure clean everything up. :param work: the workfile :type work: :class:`JB_File` :param release: the releasefile :type release: :class:`JB_File` :param checks: the action collection object with sanity checks It should accept a :class:`JB_File` as object for execute. :type checks: :class:`ActionCollection` :param cleanup: a action collection object that holds cleanup actions for the given file. It should accept a :class:`JB_File` as object for execute. :type cleanup: :class:`ActionCollection` :param comment: comment for the release :type comment: :class:`str` :returns: An ActionCollection ready to execute. :rtype: :class:`ActionCollection` :raises: None" "def _pb_from_query(query): """"""Convert a Query instance to the corresponding protobuf. :type query: :class:`Query` :param query: The source query. :rtype: :class:`.query_pb2.Query` :returns: A protobuf that can be sent to the protobuf API. N.b. that it does not contain ""in-flight"" fields for ongoing query executions (cursors, offset, limit). """""" pb = query_pb2.Query() for projection_name in query.projection: pb.projection.add().property.name = projection_name if query.kind: pb.kind.add().name = query.kind composite_filter = pb.filter.composite_filter composite_filter.op = query_pb2.CompositeFilter.AND if query.ancestor: ancestor_pb = query.ancestor.to_protobuf() # Filter on __key__ HAS_ANCESTOR == ancestor. ancestor_filter = composite_filter.filters.add().property_filter ancestor_filter.property.name = ""__key__"" ancestor_filter.op = query_pb2.PropertyFilter.HAS_ANCESTOR ancestor_filter.value.key_value.CopyFrom(ancestor_pb) for property_name, operator, value in query.filters: pb_op_enum = query.OPERATORS.get(operator) # Add the specific filter property_filter = composite_filter.filters.add().property_filter property_filter.property.name = property_name property_filter.op = pb_op_enum # Set the value to filter on based on the type. if property_name == ""__key__"": key_pb = value.to_protobuf() property_filter.value.key_value.CopyFrom(key_pb) else: helpers._set_protobuf_value(property_filter.value, value) if not composite_filter.filters: pb.ClearField(""filter"") for prop in query.order: property_order = pb.order.add() if prop.startswith(""-""): property_order.property.name = prop[1:] property_order.direction = property_order.DESCENDING else: property_order.property.name = prop property_order.direction = property_order.ASCENDING for distinct_on_name in query.distinct_on: pb.distinct_on.add().name = distinct_on_name return pb","Convert a Query instance to the corresponding protobuf. :type query: :class:`Query` :param query: The source query. :rtype: :class:`.query_pb2.Query` :returns: A protobuf that can be sent to the protobuf API. N.b. that it does not contain ""in-flight"" fields for ongoing query executions (cursors, offset, limit)." "def _dataset_create_resources(self): # type: () -> None """"""Creates resource objects in dataset """""" if 'resources' in self.data: self.old_data['resources'] = self._copy_hdxobjects(self.resources, hdx.data.resource.Resource, 'file_to_upload') self.init_resources() self.separate_resources()",Creates resource objects in dataset "def transform(self, trans): '''Return a copy of this neuron with a 3D transformation applied''' _data = deepcopy(self._data) _data.data_block[:, 0:3] = trans(_data.data_block[:, 0:3]) return FstNeuron(_data, self.name)",Return a copy of this neuron with a 3D transformation applied "def sunzen_corr_cos(data, cos_zen, limit=88., max_sza=95.): """"""Perform Sun zenith angle correction. The correction is based on the provided cosine of the zenith angle (``cos_zen``). The correction is limited to ``limit`` degrees (default: 88.0 degrees). For larger zenith angles, the correction is the same as at the ``limit`` if ``max_sza`` is `None`. The default behavior is to gradually reduce the correction past ``limit`` degrees up to ``max_sza`` where the correction becomes 0. Both ``data`` and ``cos_zen`` should be 2D arrays of the same shape. """""" # Convert the zenith angle limit to cosine of zenith angle limit_rad = np.deg2rad(limit) limit_cos = np.cos(limit_rad) max_sza_rad = np.deg2rad(max_sza) if max_sza is not None else max_sza # Cosine correction corr = 1. / cos_zen if max_sza is not None: # gradually fall off for larger zenith angle grad_factor = (np.arccos(cos_zen) - limit_rad) / (max_sza_rad - limit_rad) # invert the factor so maximum correction is done at `limit` and falls off later grad_factor = 1. - np.log(grad_factor + 1) / np.log(2) # make sure we don't make anything negative grad_factor = grad_factor.clip(0.) else: # Use constant value (the limit) for larger zenith angles grad_factor = 1. corr = corr.where(cos_zen > limit_cos, grad_factor / limit_cos) # Force ""night"" pixels to 0 (where SZA is invalid) corr = corr.where(cos_zen.notnull(), 0) return data * corr","Perform Sun zenith angle correction. The correction is based on the provided cosine of the zenith angle (``cos_zen``). The correction is limited to ``limit`` degrees (default: 88.0 degrees). For larger zenith angles, the correction is the same as at the ``limit`` if ``max_sza`` is `None`. The default behavior is to gradually reduce the correction past ``limit`` degrees up to ``max_sza`` where the correction becomes 0. Both ``data`` and ``cos_zen`` should be 2D arrays of the same shape." "def copy(source, dest, name=None, shallow=False, without_attrs=False, log=None, if_exists='raise', dry_run=False, **create_kws): """"""Copy the `source` array or group into the `dest` group. Parameters ---------- source : group or array/dataset A zarr group or array, or an h5py group or dataset. dest : group A zarr or h5py group. name : str, optional Name to copy the object to. shallow : bool, optional If True, only copy immediate children of `source`. without_attrs : bool, optional Do not copy user attributes. log : callable, file path or file-like object, optional If provided, will be used to log progress information. if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional How to handle arrays that already exist in the destination group. If 'raise' then a CopyError is raised on the first array already present in the destination group. If 'replace' then any array will be replaced in the destination. If 'skip' then any existing arrays will not be copied. If 'skip_initialized' then any existing arrays with all chunks initialized will not be copied (not available when copying to h5py). dry_run : bool, optional If True, don't actually copy anything, just log what would have happened. **create_kws Passed through to the create_dataset method when copying an array/dataset. Returns ------- n_copied : int Number of items copied. n_skipped : int Number of items skipped. n_bytes_copied : int Number of bytes of data that were actually copied. Examples -------- Here's an example of copying a group named 'foo' from an HDF5 file to a Zarr group:: >>> import h5py >>> import zarr >>> import numpy as np >>> source = h5py.File('data/example.h5', mode='w') >>> foo = source.create_group('foo') >>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,)) >>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,)) >>> zarr.tree(source) / ├── foo │ └── bar │ └── baz (100,) int64 └── spam (100,) int64 >>> dest = zarr.group() >>> from sys import stdout >>> zarr.copy(source['foo'], dest, log=stdout) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 all done: 3 copied, 0 skipped, 800 bytes copied (3, 0, 800) >>> dest.tree() # N.B., no spam / └── foo └── bar └── baz (100,) int64 >>> source.close() The ``if_exists`` parameter provides options for how to handle pre-existing data in the destination. Here are some examples of these options, also using ``dry_run=True`` to find out what would happen without actually copying anything:: >>> source = zarr.group() >>> dest = zarr.group() >>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100)) >>> spam = source.create_dataset('foo/spam', data=np.arange(1000)) >>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000)) >>> from sys import stdout >>> try: ... zarr.copy(source['foo'], dest, log=stdout, dry_run=True) ... except zarr.CopyError as e: ... print(e) ... copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 an object 'spam' already exists in destination '/foo' >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 copy /foo/spam (1000,) int64 dry run: 4 copied, 0 skipped (4, 0, 0) >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 skip /foo/spam (1000,) int64 dry run: 3 copied, 1 skipped (3, 1, 0) Notes ----- Please note that this is an experimental feature. The behaviour of this function is still evolving and the default behaviour and/or parameters may change in future versions. """""" # value checks _check_dest_is_group(dest) # setup logging with _LogWriter(log) as log: # do the copying n_copied, n_skipped, n_bytes_copied = _copy( log, source, dest, name=name, root=True, shallow=shallow, without_attrs=without_attrs, if_exists=if_exists, dry_run=dry_run, **create_kws ) # log a final message with a summary of what happened _log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied) return n_copied, n_skipped, n_bytes_copied","Copy the `source` array or group into the `dest` group. Parameters ---------- source : group or array/dataset A zarr group or array, or an h5py group or dataset. dest : group A zarr or h5py group. name : str, optional Name to copy the object to. shallow : bool, optional If True, only copy immediate children of `source`. without_attrs : bool, optional Do not copy user attributes. log : callable, file path or file-like object, optional If provided, will be used to log progress information. if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional How to handle arrays that already exist in the destination group. If 'raise' then a CopyError is raised on the first array already present in the destination group. If 'replace' then any array will be replaced in the destination. If 'skip' then any existing arrays will not be copied. If 'skip_initialized' then any existing arrays with all chunks initialized will not be copied (not available when copying to h5py). dry_run : bool, optional If True, don't actually copy anything, just log what would have happened. **create_kws Passed through to the create_dataset method when copying an array/dataset. Returns ------- n_copied : int Number of items copied. n_skipped : int Number of items skipped. n_bytes_copied : int Number of bytes of data that were actually copied. Examples -------- Here's an example of copying a group named 'foo' from an HDF5 file to a Zarr group:: >>> import h5py >>> import zarr >>> import numpy as np >>> source = h5py.File('data/example.h5', mode='w') >>> foo = source.create_group('foo') >>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,)) >>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,)) >>> zarr.tree(source) / ├── foo │ └── bar │ └── baz (100,) int64 └── spam (100,) int64 >>> dest = zarr.group() >>> from sys import stdout >>> zarr.copy(source['foo'], dest, log=stdout) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 all done: 3 copied, 0 skipped, 800 bytes copied (3, 0, 800) >>> dest.tree() # N.B., no spam / └── foo └── bar └── baz (100,) int64 >>> source.close() The ``if_exists`` parameter provides options for how to handle pre-existing data in the destination. Here are some examples of these options, also using ``dry_run=True`` to find out what would happen without actually copying anything:: >>> source = zarr.group() >>> dest = zarr.group() >>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100)) >>> spam = source.create_dataset('foo/spam', data=np.arange(1000)) >>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000)) >>> from sys import stdout >>> try: ... zarr.copy(source['foo'], dest, log=stdout, dry_run=True) ... except zarr.CopyError as e: ... print(e) ... copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 an object 'spam' already exists in destination '/foo' >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 copy /foo/spam (1000,) int64 dry run: 4 copied, 0 skipped (4, 0, 0) >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 skip /foo/spam (1000,) int64 dry run: 3 copied, 1 skipped (3, 1, 0) Notes ----- Please note that this is an experimental feature. The behaviour of this function is still evolving and the default behaviour and/or parameters may change in future versions." "def filterAcceptsRow(self, row, parentindex): """"""Return True, if the filter accepts the given row of the parent :param row: the row to filter :type row: :class:`int` :param parentindex: the parent index :type parentindex: :class:`QtCore.QModelIndex` :returns: True, if the filter accepts the row :rtype: :class:`bool` :raises: None """""" if not super(ReftrackSortFilterModel, self).filterAcceptsRow(row, parentindex): return False if parentindex.isValid(): m = parentindex.model() else: m = self.sourceModel() i = m.index(row, 18, parentindex) reftrack = i.data(REFTRACK_OBJECT_ROLE) if not reftrack: return True else: return self.filter_accept_reftrack(reftrack)","Return True, if the filter accepts the given row of the parent :param row: the row to filter :type row: :class:`int` :param parentindex: the parent index :type parentindex: :class:`QtCore.QModelIndex` :returns: True, if the filter accepts the row :rtype: :class:`bool` :raises: None" "def preprocess_dataset(data, min_freq=5, max_vocab_size=None): """"""Dataset preprocessing helper. Parameters ---------- data : mx.data.Dataset Input Dataset. For example gluonnlp.data.Text8 or gluonnlp.data.Fil9 min_freq : int, default 5 Minimum token frequency for a token to be included in the vocabulary and returned DataStream. max_vocab_size : int, optional Specifies a maximum size for the vocabulary. Returns ------- gluonnlp.data.DataStream Each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. gluonnlp.Vocab Vocabulary of all tokens in Text8 that occur at least min_freq times of maximum size max_vocab_size. idx_to_counts : list of int Mapping from token indices to their occurrence-counts in the Text8 dataset. """""" with print_time('count and construct vocabulary'): counter = nlp.data.count_tokens(itertools.chain.from_iterable(data)) vocab = nlp.Vocab(counter, unknown_token=None, padding_token=None, bos_token=None, eos_token=None, min_freq=min_freq, max_size=max_vocab_size) idx_to_counts = [counter[w] for w in vocab.idx_to_token] def code(sentence): return [vocab[token] for token in sentence if token in vocab] with print_time('code data'): data = data.transform(code, lazy=False) data = nlp.data.SimpleDataStream([data]) return data, vocab, idx_to_counts","Dataset preprocessing helper. Parameters ---------- data : mx.data.Dataset Input Dataset. For example gluonnlp.data.Text8 or gluonnlp.data.Fil9 min_freq : int, default 5 Minimum token frequency for a token to be included in the vocabulary and returned DataStream. max_vocab_size : int, optional Specifies a maximum size for the vocabulary. Returns ------- gluonnlp.data.DataStream Each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. gluonnlp.Vocab Vocabulary of all tokens in Text8 that occur at least min_freq times of maximum size max_vocab_size. idx_to_counts : list of int Mapping from token indices to their occurrence-counts in the Text8 dataset." "def is_android_api(self): """""" Tries to guess if the current class is an Android API class. This might be not very precise unless an apilist is given, with classes that are in fact known APIs. Such a list might be generated by using the android.jar files. :return: boolean """""" # Packages found at https://developer.android.com/reference/packages.html api_candidates = [""Landroid/"", ""Lcom/android/internal/util"", ""Ldalvik/"", ""Ljava/"", ""Ljavax/"", ""Lorg/apache/"", ""Lorg/json/"", ""Lorg/w3c/dom/"", ""Lorg/xml/sax"", ""Lorg/xmlpull/v1/"", ""Ljunit/""] if not self.is_external(): # API must be external return False if self.apilist: return self.orig_class.get_name() in self.apilist else: for candidate in api_candidates: if self.orig_class.get_name().startswith(candidate): return True return False","Tries to guess if the current class is an Android API class. This might be not very precise unless an apilist is given, with classes that are in fact known APIs. Such a list might be generated by using the android.jar files. :return: boolean" "def sprinter(self): """""" Called when parallelize is True. This function will generate the file names in a directory tree by adding directories to a Queue and continuously exploring directories in the Queue until Queue is emptied. Significantly faster than crawler method for larger directory trees. """""" self._printer('Multiprocess Walk') # Loop through directories in case there is more than one (1) for directory in self.directory: self._get_root_files(directory) # Add file within root directory if filepaths is empty # acquire the list of paths first_level_dirs = next(os.walk(directory))[1] for path in first_level_dirs: self.unsearched.put((directory, path)) self._printer('Pool Processing STARTED') pool = Pool(self.pool_size) pool.map_async(self.parallel_worker, range(self.pool_size)) pool.close() self.unsearched.join() self._printer('Pool Processing ENDED') return self.filepaths","Called when parallelize is True. This function will generate the file names in a directory tree by adding directories to a Queue and continuously exploring directories in the Queue until Queue is emptied. Significantly faster than crawler method for larger directory trees." "def convert_to_timezone_naive(time_to_freeze): """""" Converts a potentially timezone-aware datetime to be a naive UTC datetime """""" if time_to_freeze.tzinfo: time_to_freeze -= time_to_freeze.utcoffset() time_to_freeze = time_to_freeze.replace(tzinfo=None) return time_to_freeze",Converts a potentially timezone-aware datetime to be a naive UTC datetime "def strip_quotes(self, content): """""" Unquote given rule. Args: content (str): An import rule. Raises: InvalidImportRule: Raise exception if the rule is badly quoted (not started or not ended quotes). Returns: string: The given rule unquoted. """""" error_msg = ""Following rule is badly quoted: {}"" if (content.startswith('""') and content.endswith('""')) or \ (content.startswith(""'"") and content.endswith(""'"")): return content[1:-1] # Quote starting but not ended elif (content.startswith('""') and not content.endswith('""')) or \ (content.startswith(""'"") and not content.endswith(""'"")): raise InvalidImportRule(error_msg.format(content)) # Quote ending but not started elif (not content.startswith('""') and content.endswith('""')) or \ (not content.startswith(""'"") and content.endswith(""'"")): raise InvalidImportRule(error_msg.format(content)) return content","Unquote given rule. Args: content (str): An import rule. Raises: InvalidImportRule: Raise exception if the rule is badly quoted (not started or not ended quotes). Returns: string: The given rule unquoted." "def send_response(self, request, result=None, error=None): """"""Respond to a JSON-RPC method call. This is a response to the message in *request*. If *error* is not provided, then this is a succesful response, and the value in *result*, which may be ``None``, is passed back to the client. if *error* is provided and not ``None`` then an error is sent back. In this case *error* must be a dictionary as specified by the JSON-RPC spec. """""" message = self._version.create_response(request, result, error) self.send_message(message)","Respond to a JSON-RPC method call. This is a response to the message in *request*. If *error* is not provided, then this is a succesful response, and the value in *result*, which may be ``None``, is passed back to the client. if *error* is provided and not ``None`` then an error is sent back. In this case *error* must be a dictionary as specified by the JSON-RPC spec." "def check_master(self): ''' Log if the master is not running :rtype: bool :return: Whether or not the master is running ''' if not os.path.exists( os.path.join( self.opts['sock_dir'], 'publish_pull.ipc' ) ): return False return True","Log if the master is not running :rtype: bool :return: Whether or not the master is running" "def circuit_to_quirk_url(circuit: circuits.Circuit, prefer_unknown_gate_to_failure: bool=False, escape_url=True) -> str: """"""Returns a Quirk URL for the given circuit. Args: circuit: The circuit to open in Quirk. prefer_unknown_gate_to_failure: If not set, gates that fail to convert will cause this function to raise an error. If set, a URL containing bad gates will be generated. (Quirk will open the URL, and replace the bad gates with parse errors, but still get the rest of the circuit.) escape_url: If set, the generated URL will have special characters such as quotes escaped using %. This makes it possible to paste the URL into forums and the command line and etc and have it properly parse. If not set, the generated URL will be more compact and human readable (and can still be pasted directly into a browser's address bar). Returns: """""" circuit = circuit.copy() linearize_circuit_qubits(circuit) cols = [] # Type: List[List[Any]] for moment in circuit: can_merges = [] for op in moment.operations: for col, can_merge in _to_quirk_cols( op, prefer_unknown_gate_to_failure): if can_merge: can_merges.append(col) else: cols.append(col) if can_merges: merged_col = [1] * max(len(e) for e in can_merges) for col in can_merges: for i in range(len(col)): if col[i] != 1: merged_col[i] = col[i] cols.append(merged_col) circuit_json = json.JSONEncoder(ensure_ascii=False, separators=(',', ':'), sort_keys=True).encode({'cols': cols}) if escape_url: suffix = urllib.parse.quote(circuit_json) else: suffix = circuit_json return 'http://algassert.com/quirk#circuit={}'.format(suffix)","Returns a Quirk URL for the given circuit. Args: circuit: The circuit to open in Quirk. prefer_unknown_gate_to_failure: If not set, gates that fail to convert will cause this function to raise an error. If set, a URL containing bad gates will be generated. (Quirk will open the URL, and replace the bad gates with parse errors, but still get the rest of the circuit.) escape_url: If set, the generated URL will have special characters such as quotes escaped using %. This makes it possible to paste the URL into forums and the command line and etc and have it properly parse. If not set, the generated URL will be more compact and human readable (and can still be pasted directly into a browser's address bar). Returns:" "def get_oauth_request(self): """"""Return an OAuth Request object for the current request."""""" try: method = os.environ['REQUEST_METHOD'] except: method = 'GET' postdata = None if method in ('POST', 'PUT'): postdata = self.request.body return oauth.Request.from_request(method, self.request.uri, headers=self.request.headers, query_string=postdata)",Return an OAuth Request object for the current request. "def _entity_list_as_bel(entities: Iterable[BaseEntity]) -> str: """"""Stringify a list of BEL entities."""""" return ', '.join( e.as_bel() for e in entities )",Stringify a list of BEL entities. "def load_to_array(self, keys): """""" This loads the data contained in the catalogue into a numpy array. The method works only for float data :param keys: A list of keys to be uploaded into the array :type list: """""" # Preallocate the numpy array data = np.empty((len(self.data[keys[0]]), len(keys))) for i in range(0, len(self.data[keys[0]])): for j, key in enumerate(keys): data[i, j] = self.data[key][i] return data","This loads the data contained in the catalogue into a numpy array. The method works only for float data :param keys: A list of keys to be uploaded into the array :type list:" "def get_item_prices(self, package_id): """"""Get item prices. Retrieve a SoftLayer_Product_Package item prices record. :param int package_id: package identifier. :returns: A list of price IDs associated with the given package. """""" mask = 'mask[pricingLocationGroup[locations]]' prices = self.package_svc.getItemPrices(id=package_id, mask=mask) return prices","Get item prices. Retrieve a SoftLayer_Product_Package item prices record. :param int package_id: package identifier. :returns: A list of price IDs associated with the given package." "def _create_template(self, name): """"""Create an instance of a tornado.template.Template object for the given template name. :param str name: The name/path to the template :rtype: tornado.template.Template """""" url = '%s/%s' % (self._base_url, escape.url_escape(name)) LOGGER.debug('Making HTTP GET request to %s', url) response = self._http_client.fetch(url) data = json.loads(response.body, ensure_ascii=False) return template.Template(data['template'], name=name, loader=self)","Create an instance of a tornado.template.Template object for the given template name. :param str name: The name/path to the template :rtype: tornado.template.Template" "def sext(self, n): ''' Sign-extend the variable to n bits. n bits must be stricly larger than the actual number of bits, or a ValueError is thrown ''' if n <= self.nbits: raise ValueError(""n must be > %d bits"" % self.nbits) mba_ret = self.__new_mba(n) ret = mba_ret.from_cst(0) for i in range(self.nbits): ret.vec[i] = self.vec[i] last_bit = self.vec[self.nbits-1] for i in range(self.nbits,n): ret.vec[i] = last_bit return mba_ret.from_vec(ret)","Sign-extend the variable to n bits. n bits must be stricly larger than the actual number of bits, or a ValueError is thrown" "def louvain_clustering(self, X=None, res=1, method='modularity'): """"""Runs Louvain clustering using the vtraag implementation. Assumes that 'louvain' optional dependency is installed. Parameters ---------- res - float, optional, default 1 The resolution parameter which tunes the number of clusters Louvain finds. method - str, optional, default 'modularity' Can be 'modularity' or 'significance', which are two different optimizing funcitons in the Louvain algorithm. """""" if X is None: X = self.adata.uns['neighbors']['connectivities'] save = True else: if not sp.isspmatrix_csr(X): X = sp.csr_matrix(X) save = False import igraph as ig import louvain adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr() sources, targets = adjacency.nonzero() weights = adjacency[sources, targets] if isinstance(weights, np.matrix): weights = weights.A1 g = ig.Graph(directed=True) g.add_vertices(adjacency.shape[0]) g.add_edges(list(zip(sources, targets))) try: g.es['weight'] = weights except BaseException: pass if method == 'significance': cl = louvain.find_partition(g, louvain.SignificanceVertexPartition) else: cl = louvain.find_partition( g, louvain.RBConfigurationVertexPartition, resolution_parameter=res) if save: self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership)) else: return np.array(cl.membership)","Runs Louvain clustering using the vtraag implementation. Assumes that 'louvain' optional dependency is installed. Parameters ---------- res - float, optional, default 1 The resolution parameter which tunes the number of clusters Louvain finds. method - str, optional, default 'modularity' Can be 'modularity' or 'significance', which are two different optimizing funcitons in the Louvain algorithm." "def make_module_reload_func(module_name=None, module_prefix='[???]', module=None): """""" Injects dynamic module reloading """""" module = _get_module(module_name, module, register=False) if module_name is None: module_name = str(module.__name__) def rrr(verbose=True): """""" Dynamic module reloading """""" if not __RELOAD_OK__: raise Exception('Reloading has been forced off') try: import imp if verbose and not QUIET: builtins.print('RELOAD: ' + str(module_prefix) + ' __name__=' + module_name) imp.reload(module) except Exception as ex: print(ex) print('%s Failed to reload' % module_prefix) raise # this doesn't seem to set anything on import * #_inject_funcs(module, rrr) return rrr",Injects dynamic module reloading "def check_multi_output_plate_compatibility(source_plates, sink_plate): """""" Check multi-output plate compatibility. This ensures that the source plates and sink plates match for a multi- output plate :param source_plates: The source plates :param sink_plate: The sink plate :return: True if the plates are compatible """""" if len(source_plates) == 0: if sink_plate.parent is not None: return False else: if sink_plate.parent is None: return False else: if sink_plate.parent.plate_id != source_plates[0].plate_id: return False return True","Check multi-output plate compatibility. This ensures that the source plates and sink plates match for a multi- output plate :param source_plates: The source plates :param sink_plate: The sink plate :return: True if the plates are compatible" "def get_proxy_field(self, instance): """"""Get the proxied field of this field """""" proxy_object = self.get_proxy_object(instance) if not proxy_object: return None return proxy_object.getField(self.name)",Get the proxied field of this field "def to_dicts(recarray): """"""convert record array to a dictionaries"""""" for rec in recarray: yield dict(zip(recarray.dtype.names, rec.tolist()))",convert record array to a dictionaries "def _inline_activate_venv(): """"""Built-in venv doesn't have activate_this.py, but doesn't need it anyway. As long as we find the correct executable, built-in venv sets up the environment automatically. See: https://bugs.python.org/issue21496#msg218455 """""" components = [] for name in (""bin"", ""Scripts""): bindir = os.path.join(project.virtualenv_location, name) if os.path.exists(bindir): components.append(bindir) if ""PATH"" in os.environ: components.append(os.environ[""PATH""]) os.environ[""PATH""] = os.pathsep.join(components)","Built-in venv doesn't have activate_this.py, but doesn't need it anyway. As long as we find the correct executable, built-in venv sets up the environment automatically. See: https://bugs.python.org/issue21496#msg218455" "def init_text(self): """""" Init text properties for this widget """""" d = self.declaration if d.text: self.set_text(d.text) if d.text_color: self.set_text_color(d.text_color) if d.text_alignment: self.set_text_alignment(d.text_alignment) if d.font_family or d.text_size: self.refresh_font() if hasattr(d, 'max_lines') and d.max_lines: self.set_max_lines(d.max_lines)",Init text properties for this widget "def getoneblock(astr, start, end): """"""get the block bounded by start and end doesn't work for multiple blocks"""""" alist = astr.split(start) astr = alist[-1] alist = astr.split(end) astr = alist[0] return astr","get the block bounded by start and end doesn't work for multiple blocks" "def _trychar(char, fallback, asciimode=None): # nocover """""" Logic from IPython timeit to handle terminals that cant show mu Args: char (str): character, typically unicode, to try to use fallback (str): ascii character to use if stdout cannot encode char asciimode (bool): if True, always use fallback Example: >>> char = _trychar('µs', 'us') >>> print('char = {}'.format(char)) >>> assert _trychar('µs', 'us', asciimode=True) == 'us' """""" if asciimode is True: # If we request ascii mode simply return it return fallback if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: # pragma: nobranch try: char.encode(sys.stdout.encoding) except Exception: # nocover pass else: return char return fallback","Logic from IPython timeit to handle terminals that cant show mu Args: char (str): character, typically unicode, to try to use fallback (str): ascii character to use if stdout cannot encode char asciimode (bool): if True, always use fallback Example: >>> char = _trychar('µs', 'us') >>> print('char = {}'.format(char)) >>> assert _trychar('µs', 'us', asciimode=True) == 'us'" "def predict(df, filters, model_fit, ytransform=None): """""" Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters. """""" df = util.apply_filter_query(df, filters) with log_start_finish('statsmodels predict', logger): sim_data = model_fit.predict(df) if len(sim_data) != len(df): raise ModelEvaluationError( 'Predicted data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') if ytransform: sim_data = ytransform(sim_data) return pd.Series(sim_data, index=df.index)","Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters." "def predict(self, parameters, viterbi): """""" Run forward algorithm to find the predicted distribution over classes. """""" x_dot_parameters = np.einsum('ijk,kl->ijl', self.x, parameters) if not viterbi: alpha = forward_predict(self._lattice, x_dot_parameters, self.state_machine.n_states) else: alpha = forward_max_predict(self._lattice, x_dot_parameters, self.state_machine.n_states) I, J, _ = self.x.shape class_Z = {} Z = -np.inf for state, predicted_class in self.states_to_classes.items(): weight = alpha[I - 1, J - 1, state] class_Z[self.states_to_classes[state]] = weight Z = np.logaddexp(Z, weight) return {label: np.exp(class_z - Z) for label, class_z in class_Z.items()}",Run forward algorithm to find the predicted distribution over classes. "def installed(name, nodataset=False, brand_opts=None): ''' Ensure zone is installed name : string name of the zone nodataset : boolean do not create a ZFS file system brand_opts : boolean brand specific options to pass ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} zones = __salt__['zoneadm.list'](installed=True, configured=True) if name in zones: if zones[name]['state'] == 'configured': if __opts__['test']: res_install = {'status': True} else: res_install = __salt__['zoneadm.install'](name, nodataset, brand_opts) ret['result'] = res_install['status'] if ret['result']: ret['changes'][name] = 'installed' ret['comment'] = 'The zone {0} was installed.'.format(name) else: ret['comment'] = [] ret['comment'].append('Failed to install zone {0}!'.format(name)) if 'message' in res_install: ret['comment'].append(res_install['message']) ret['comment'] = ""\n"".join(ret['comment']) else: ret['result'] = True ret['comment'] = 'zone {0} already installed.'.format(name) else: ret['result'] = False ret['comment'] = 'zone {0} is not configured!'.format(name) return ret","Ensure zone is installed name : string name of the zone nodataset : boolean do not create a ZFS file system brand_opts : boolean brand specific options to pass" "def get_tab(self, tab_name, allow_disabled=False): """"""Returns a specific tab from this tab group. If the tab is not allowed or not enabled this method returns ``None``. If the tab is disabled but you wish to return it anyway, you can pass ``True`` to the allow_disabled argument. """""" tab = self._tabs.get(tab_name, None) if tab and tab._allowed and (tab._enabled or allow_disabled): return tab return None","Returns a specific tab from this tab group. If the tab is not allowed or not enabled this method returns ``None``. If the tab is disabled but you wish to return it anyway, you can pass ``True`` to the allow_disabled argument." "def reset(cls): """""" Reset the conspect elements to initial state. """""" cls.input_el.value = """" cls.subconspect_el.html = """" cls.show_error(False)",Reset the conspect elements to initial state. "def autocrop(im, autocrop=False, **kwargs): """""" Remove any unnecessary whitespace from the edges of the source image. This processor should be listed before :func:`scale_and_crop` so the whitespace is removed from the source image before it is resized. autocrop Activates the autocrop method for this image. """""" if autocrop: # If transparent, flatten. if utils.is_transparent(im): no_alpha = Image.new('L', im.size, (255)) no_alpha.paste(im, mask=im.split()[-1]) else: no_alpha = im.convert('L') # Convert to black and white image. bw = no_alpha.convert('L') # bw = bw.filter(ImageFilter.MedianFilter) # White background. bg = Image.new('L', im.size, 255) bbox = ImageChops.difference(bw, bg).getbbox() if bbox: im = im.crop(bbox) return im","Remove any unnecessary whitespace from the edges of the source image. This processor should be listed before :func:`scale_and_crop` so the whitespace is removed from the source image before it is resized. autocrop Activates the autocrop method for this image." "def from_str(cls, coordinate): """"""Build a TikZCoordinate object from a string."""""" m = cls._coordinate_str_regex.match(coordinate) if m is None: raise ValueError('invalid coordinate string') if m.group(1) == '++': relative = True else: relative = False return TikZCoordinate( float(m.group(2)), float(m.group(4)), relative=relative)",Build a TikZCoordinate object from a string. "def generate_sitemap(self, path='sitemap.xml', https=False): """""" Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP. """""" sitemap = russell.sitemap.generate_sitemap(self, https=https) self.write_file(path, sitemap)","Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP." "def process_selection(self, instance_count, node_reg, node_ids): # Select primaries for current view_no if instance_count == 0: return [] ''' Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas. ''' primaries = [] primary_rank = None for i in range(instance_count): if i == 0: primary_name = self._next_primary_node_name_for_master(node_reg, node_ids) primary_rank = self.node.get_rank_by_name(primary_name, node_reg, node_ids) if primary_rank is None: raise LogicError('primary_rank must not be None') else: primary_name, _ = self.next_primary_replica_name_for_backup( i, primary_rank, primaries, node_reg, node_ids) primaries.append(primary_name) logger.display(""{} selected primary {} for instance {} (view {})"" .format(PRIMARY_SELECTION_PREFIX, primary_name, i, self.viewNo), extra={""cli"": ""ANNOUNCE"", ""tags"": [""node-election""]}) if len(primaries) != instance_count: raise LogicError('instances inconsistency') if len(primaries) != len(set(primaries)): raise LogicError('repeating instances') return primaries","Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas." "def add_debugged_source_file(self, debugged_source_file): """"""Add a DebuggedSourceFile proto."""""" # TODO(cais): Should the key include a host name, for certain distributed # cases? key = debugged_source_file.file_path self._source_file_host[key] = debugged_source_file.host self._source_file_last_modified[key] = debugged_source_file.last_modified self._source_file_bytes[key] = debugged_source_file.bytes self._source_file_content[key] = debugged_source_file.lines",Add a DebuggedSourceFile proto. "def load(self, source_list: Iterable[List[str]], target_sentences: Iterable[List[Any]], num_samples_per_bucket: List[int]) -> 'ParallelDataSet': """""" Creates a parallel dataset base on source list of strings and target sentences. Returns a `sockeye.data_io.ParallelDataSet`. :param source_list: Source list of strings (e.g., filenames). :param target_sentences: Target sentences used to do bucketing. :param num_samples_per_bucket: Number of samples per bucket. :return: Returns a parallel dataset `sockeye.data_io.ParallelDataSet`. """""" assert len(num_samples_per_bucket) == len(self.buckets) data_source = [np.full((num_samples,), self.pad_id, dtype=object) for num_samples in num_samples_per_bucket] # data_source is a List[numpy.array[str]] which semantic is bucket, index, str # Its loading to memory is deferred to the iterator, since the full data # is supposed to not fit in memory. data_target = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype) for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)] data_label = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype) for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)] bucket_sample_index = [0 for buck in self.buckets] # track amount of padding introduced through bucketing num_tokens_target = 0 num_pad_target = 0 # Bucket sentences as padded np arrays for source, target in zip(source_list, target_sentences): target_len = len(target) buck_index, buck = get_target_bucket(self.buckets, target_len) if buck is None: continue # skip this sentence pair num_tokens_target += buck[1] num_pad_target += buck[1] - target_len sample_index = bucket_sample_index[buck_index] data_source[buck_index][sample_index] = source data_target[buck_index][sample_index, :target_len] = target # NOTE(fhieber): while this is wasteful w.r.t memory, we need to explicitly create the label sequence # with the EOS symbol here sentence-wise and not per-batch due to variable sequence length within a batch. # Once MXNet allows item assignments given a list of indices (probably MXNet 1.0): e.g a[[0,1,5,2]] = x, # we can try again to compute the label sequence on the fly in next(). data_label[buck_index][sample_index, :target_len] = target[1:] + [self.eos_id] bucket_sample_index[buck_index] += 1 for i in range(len(data_source)): data_target[i] = mx.nd.array(data_target[i], dtype=self.dtype) data_label[i] = mx.nd.array(data_label[i], dtype=self.dtype) if num_tokens_target > 0: logger.info(""Created bucketed parallel data set. Introduced padding: target=%.1f%%)"", num_pad_target / num_tokens_target * 100) return ParallelDataSet(data_source, data_target, data_label)","Creates a parallel dataset base on source list of strings and target sentences. Returns a `sockeye.data_io.ParallelDataSet`. :param source_list: Source list of strings (e.g., filenames). :param target_sentences: Target sentences used to do bucketing. :param num_samples_per_bucket: Number of samples per bucket. :return: Returns a parallel dataset `sockeye.data_io.ParallelDataSet`." "def objects(self, json_result=False): """""" Return a generator of all processed data, it just like render but it'll not return a table or json format data but just data. And the data will be processed by fields_convert_map if passed. """""" self.rows_num = 0 query = self.query() if not isinstance(query, (orm.Result, list, dict)): query = do_(query) for record in query: self.rows_num += 1 r = self.object(record, json_result) self._cal_sum(record) yield r total = self._render_sum(True) if total: yield total","Return a generator of all processed data, it just like render but it'll not return a table or json format data but just data. And the data will be processed by fields_convert_map if passed." "def complex_request(self, request, wait_for_first_response=True): """"""Send a compound command request to the interface over the normal data channel. request - A dict storing the request to send to the VI. It will be serialized to the currently selected output format. wait_for_first_response - If true, this function will block waiting for a response from the VI and return it to the caller. Otherwise, it will send the command and return immediately and any response will be lost. """""" receiver = self._prepare_response_receiver(request, receiver_class=CommandResponseReceiver) self._send_complex_request(request) responses = [] if wait_for_first_response: responses = receiver.wait_for_responses() return responses","Send a compound command request to the interface over the normal data channel. request - A dict storing the request to send to the VI. It will be serialized to the currently selected output format. wait_for_first_response - If true, this function will block waiting for a response from the VI and return it to the caller. Otherwise, it will send the command and return immediately and any response will be lost." "def _build_paths(self, name, spec_path_lists, exists): """""" Given an environment variable name and specified paths, return a pathsep-separated string of paths containing unique, extant, directories from those paths and from the environment variable. Raise an error if no paths are resolved. """""" # flatten spec_path_lists spec_paths = itertools.chain.from_iterable(spec_path_lists) env_paths = safe_env.get(name, '').split(os.pathsep) paths = itertools.chain(spec_paths, env_paths) extant_paths = list(filter(os.path.isdir, paths)) if exists else paths if not extant_paths: msg = ""%s environment variable is empty"" % name.upper() raise distutils.errors.DistutilsPlatformError(msg) unique_paths = self._unique_everseen(extant_paths) return os.pathsep.join(unique_paths)","Given an environment variable name and specified paths, return a pathsep-separated string of paths containing unique, extant, directories from those paths and from the environment variable. Raise an error if no paths are resolved." "def get_sesames(email, password, device_ids=None, nicknames=None, timeout=5): """"""Return list of available Sesame objects."""""" sesames = [] account = CandyHouseAccount(email, password, timeout=timeout) for sesame in account.sesames: if device_ids is not None and sesame['device_id'] not in device_ids: continue if nicknames is not None and sesame['nickname'] not in nicknames: continue sesames.append(Sesame(account, sesame)) return sesames",Return list of available Sesame objects. "def process_csxml_file(self, filename, interval=None, lazy=False): """"""Processes a filehandle to MedScan csxml input into INDRA statements. The CSXML format consists of a top-level `` root element containing a series of `` (document) elements, in turn containing `` (section) elements, and in turn containing `` (sentence) elements. Within the `` element, a series of additional elements appear in the following order: * ``, which contains a tokenized form of the sentence in its text attribute * ``, which describes any preprocessing/normalization done to the underlying text * `` elements, each of which contains one of more `` elements, describing entities in the text with their identifiers. The local IDs of each entities are given in the `msid` attribute of this element; these IDs are then referenced in any subsequent SVO elements. * `` elements, representing subject-verb-object triples. SVO elements with a `type` attribute of `CONTROL` represent normalized regulation relationships; they often represent the normalized extraction of the immediately preceding (but unnormalized SVO element). However, in some cases there can be a ""CONTROL"" SVO element without its parent immediately preceding it. Parameters ---------- filename : string The path to a Medscan csxml file. interval : (start, end) or None Select the interval of documents to read, starting with the `start`th document and ending before the `end`th document. If either is None, the value is considered undefined. If the value exceeds the bounds of available documents, it will simply be ignored. lazy : bool If True, only create a generator which can be used by the `get_statements` method. If True, populate the statements list now. """""" if interval is None: interval = (None, None) tmp_fname = tempfile.mktemp(os.path.basename(filename)) fix_character_encoding(filename, tmp_fname) self.__f = open(tmp_fname, 'rb') self._gen = self._iter_through_csxml_file_from_handle(*interval) if not lazy: for stmt in self._gen: self.statements.append(stmt) return","Processes a filehandle to MedScan csxml input into INDRA statements. The CSXML format consists of a top-level `` root element containing a series of `` (document) elements, in turn containing `` (section) elements, and in turn containing `` (sentence) elements. Within the `` element, a series of additional elements appear in the following order: * ``, which contains a tokenized form of the sentence in its text attribute * ``, which describes any preprocessing/normalization done to the underlying text * `` elements, each of which contains one of more `` elements, describing entities in the text with their identifiers. The local IDs of each entities are given in the `msid` attribute of this element; these IDs are then referenced in any subsequent SVO elements. * `` elements, representing subject-verb-object triples. SVO elements with a `type` attribute of `CONTROL` represent normalized regulation relationships; they often represent the normalized extraction of the immediately preceding (but unnormalized SVO element). However, in some cases there can be a ""CONTROL"" SVO element without its parent immediately preceding it. Parameters ---------- filename : string The path to a Medscan csxml file. interval : (start, end) or None Select the interval of documents to read, starting with the `start`th document and ending before the `end`th document. If either is None, the value is considered undefined. If the value exceeds the bounds of available documents, it will simply be ignored. lazy : bool If True, only create a generator which can be used by the `get_statements` method. If True, populate the statements list now." "def commit_output(cls, shard_ctx, iterator): """"""Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs. """""" # We accept an iterator just in case output references get too big. outs = tuple(iterator) shard_ctx._state.writer_state[""outs""] = outs","Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs." "def get_all_confirmations(self, params=None): """""" Get all confirmations This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """""" if not params: params = {} return self._iterate_through_pages( self.get_confirmations_per_page, resource=CONFIRMATIONS, **{'params': params} )","Get all confirmations This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list" "def text_to_speech(text, synthesizer, synth_args, sentence_break): """""" Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '. """""" if len(text.split()) < 50: if synthesizer == 'watson': with open('.temp.wav', 'wb') as temp: temp.write(watson_request(text=text, synth_args=synth_args).content) response = AudioSegment.from_wav('.temp.wav') os.remove('.temp.wav') return response else: raise ValueError('""' + synthesizer + '"" synthesizer not found.') else: segments = [] for i, sentence in enumerate(text.split(sentence_break)): if synthesizer == 'watson': with open('.temp' + str(i) + '.wav', 'wb') as temp: temp.write(watson_request(text=sentence, synth_args=synth_args).content) segments.append(AudioSegment.from_wav('.temp' + str(i) + '.wav')) os.remove('.temp' + str(i) + '.wav') else: raise ValueError('""' + synthesizer + '"" synthesizer not found.') response = segments[0] for segment in segments[1:]: response = response + segment return response","Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '." "def _gatk_apply_bqsr(data): """"""Parallel BQSR support for GATK4. Normalized qualities to 3 bin outputs at 10, 20 and 30 based on pipeline standard recommendations, which will help with output file sizes: https://github.com/CCDG/Pipeline-Standardization/blob/master/PipelineStandard.md#base-quality-score-binning-scheme https://github.com/gatk-workflows/broad-prod-wgs-germline-snps-indels/blob/5585cdf7877104f2c61b2720ddfe7235f2fad577/PairedEndSingleSampleWf.gatk4.0.wdl#L1081 spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors """""" in_file = dd.get_align_bam(data) or dd.get_work_bam(data) out_file = os.path.join(dd.get_work_dir(data), ""align"", dd.get_sample_name(data), ""%s-recal.bam"" % utils.splitext_plus(os.path.basename(in_file))[0]) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: broad_runner = broad.runner_from_config(data[""config""]) gatk_type = broad_runner.gatk_type() cores = dd.get_num_cores(data) if gatk_type == ""gatk4"": resources = config_utils.get_resources(""gatk-spark"", data[""config""]) spark_opts = [str(x) for x in resources.get(""options"", [])] params = [""-T"", ""ApplyBQSRSpark"", ""--input"", in_file, ""--output"", tx_out_file, ""--bqsr-recal-file"", data[""prep_recal""], ""--static-quantized-quals"", ""10"", ""--static-quantized-quals"", ""20"", ""--static-quantized-quals"", ""30""] if spark_opts: params += spark_opts else: params += [""--spark-master"", ""local[%s]"" % cores, ""--conf"", ""spark.local.dir=%s"" % os.path.dirname(tx_out_file), ""--conf"", ""spark.driver.host=localhost"", ""--conf"", ""spark.network.timeout=800""] else: params = [""-T"", ""PrintReads"", ""-R"", dd.get_ref_file(data), ""-I"", in_file, ""-BQSR"", data[""prep_recal""], ""-o"", tx_out_file] # Avoid problems with intel deflater for GATK 3.8 and GATK4 # https://github.com/bcbio/bcbio-nextgen/issues/2145#issuecomment-343095357 if gatk_type == ""gatk4"": params += [""--jdk-deflater"", ""--jdk-inflater""] elif LooseVersion(broad_runner.gatk_major_version()) > LooseVersion(""3.7""): params += [""-jdk_deflater"", ""-jdk_inflater""] memscale = {""magnitude"": 0.9 * cores, ""direction"": ""increase""} if cores > 1 else None broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, parallel_gc=True) bam.index(out_file, data[""config""]) return out_file","Parallel BQSR support for GATK4. Normalized qualities to 3 bin outputs at 10, 20 and 30 based on pipeline standard recommendations, which will help with output file sizes: https://github.com/CCDG/Pipeline-Standardization/blob/master/PipelineStandard.md#base-quality-score-binning-scheme https://github.com/gatk-workflows/broad-prod-wgs-germline-snps-indels/blob/5585cdf7877104f2c61b2720ddfe7235f2fad577/PairedEndSingleSampleWf.gatk4.0.wdl#L1081 spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors" "def frameify(self, state, data): """"""Yield the data as a single frame."""""" try: yield state.recv_buf + data except FrameSwitch: pass finally: state.recv_buf = ''",Yield the data as a single frame. "def assert_key_has_value(self, key, caller): """"""Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None """""" assert key, (""key parameter must be specified."") self.assert_key_exists(key, caller) if self[key] is None: raise KeyInContextHasNoValueError( f""context['{key}'] must have a value for {caller}."")","Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None" "def multipublish(self, topic, messages, block=True, timeout=None, raise_error=True): """"""Publish an iterable of messages to the given topic. :param topic: the topic to publish to :param messages: iterable of bytestrings to publish :param block: wait for a connection to become available before publishing the message. If block is `False` and no connections are available, :class:`~gnsq.errors.NSQNoConnections` is raised :param timeout: if timeout is a positive number, it blocks at most ``timeout`` seconds before raising :class:`~gnsq.errors.NSQNoConnections` :param raise_error: if ``True``, it blocks until a response is received from the nsqd server, and any error response is raised. Otherwise an :class:`~gevent.event.AsyncResult` is returned """""" result = AsyncResult() conn = self._get_connection(block=block, timeout=timeout) try: self._response_queues[conn].append(result) conn.multipublish(topic, messages) finally: self._put_connection(conn) if raise_error: return result.get() return result","Publish an iterable of messages to the given topic. :param topic: the topic to publish to :param messages: iterable of bytestrings to publish :param block: wait for a connection to become available before publishing the message. If block is `False` and no connections are available, :class:`~gnsq.errors.NSQNoConnections` is raised :param timeout: if timeout is a positive number, it blocks at most ``timeout`` seconds before raising :class:`~gnsq.errors.NSQNoConnections` :param raise_error: if ``True``, it blocks until a response is received from the nsqd server, and any error response is raised. Otherwise an :class:`~gevent.event.AsyncResult` is returned" "def transpose(attrs, inputs, proto_obj): """"""Transpose the input array."""""" new_attrs = translation_utils._fix_attribute_names(attrs, {'perm' : 'axes'}) return 'transpose', new_attrs, inputs",Transpose the input array. "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False): ''' Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}' ''' return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer, merge_lists)","Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'" "def add_exec_permission_to(target_file): """"""Add executable permissions to the file :param target_file: the target file whose permission to be changed """""" mode = os.stat(target_file).st_mode os.chmod(target_file, mode | stat.S_IXUSR)","Add executable permissions to the file :param target_file: the target file whose permission to be changed" "def project_sequence(s, permutation=None): """""" Projects a point or sequence of points using `project_point` to lists xs, ys for plotting with Matplotlib. Parameters ---------- s, Sequence-like The sequence of points (3-tuples) to be projected. Returns ------- xs, ys: The sequence of projected points in coordinates as two lists """""" xs, ys = unzip([project_point(p, permutation=permutation) for p in s]) return xs, ys","Projects a point or sequence of points using `project_point` to lists xs, ys for plotting with Matplotlib. Parameters ---------- s, Sequence-like The sequence of points (3-tuples) to be projected. Returns ------- xs, ys: The sequence of projected points in coordinates as two lists" "def _IsWindowsDrivePathSegment(cls, path_segment): """"""Determines if the path segment contains a Windows Drive indicator. A drive indicator can be a drive letter or %SystemDrive%. Args: path_segment (str): path segment. Returns: bool: True if the path segment contains a Windows Drive indicator. """""" if (len(path_segment) == 2 and path_segment[1] == ':' and path_segment[0].isalpha()): return True path_segment = path_segment.upper() return path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%')","Determines if the path segment contains a Windows Drive indicator. A drive indicator can be a drive letter or %SystemDrive%. Args: path_segment (str): path segment. Returns: bool: True if the path segment contains a Windows Drive indicator." "def assign_yourself(self): """""" Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, } """""" task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if not wfi.current_actor.exist: wfi.current_actor = self.current.role wfi.save() [inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if not inv == task_invitation] title = _(u""Successful"") msg = _(u""You have successfully assigned the job to yourself."") else: title = _(u""Unsuccessful"") msg = _(u""Unfortunately, this job is already taken by someone else."") self.current.msg_box(title=title, msg=msg)","Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, }" "def loadLanguage(filename) : '''This function loads up a language configuration file and returns the configuration to be passed to the syllabify function.''' L = { ""consonants"" : [], ""vowels"" : [], ""onsets"" : [] } f = open(filename, ""r"") section = None for line in f : line = line.strip() if line in (""[consonants]"", ""[vowels]"", ""[onsets]"") : section = line[1:-1] elif section == None : raise ValueError, ""File must start with a section header such as [consonants]."" elif not section in L : raise ValueError, ""Invalid section: "" + section else : L[section].append(line) for section in ""consonants"", ""vowels"", ""onsets"" : if len(L[section]) == 0 : raise ValueError, ""File does not contain any consonants, vowels, or onsets."" return L","This function loads up a language configuration file and returns the configuration to be passed to the syllabify function." "def _ProcessUnknownEnums(message, encoded_message): """"""Add unknown enum values from encoded_message as unknown fields. ProtoRPC diverges from the usual protocol buffer behavior here and doesn't allow unknown fields. Throwing on unknown fields makes it impossible to let servers add new enum values and stay compatible with older clients, which isn't reasonable for us. We simply store unrecognized enum values as unknown fields, and all is well. Args: message: Proto message we've decoded thus far. encoded_message: JSON string we're decoding. Returns: message, with any unknown enums stored as unrecognized fields. """""" if not encoded_message: return message decoded_message = json.loads(six.ensure_str(encoded_message)) for field in message.all_fields(): if (isinstance(field, messages.EnumField) and field.name in decoded_message and message.get_assigned_value(field.name) is None): message.set_unrecognized_field( field.name, decoded_message[field.name], messages.Variant.ENUM) return message","Add unknown enum values from encoded_message as unknown fields. ProtoRPC diverges from the usual protocol buffer behavior here and doesn't allow unknown fields. Throwing on unknown fields makes it impossible to let servers add new enum values and stay compatible with older clients, which isn't reasonable for us. We simply store unrecognized enum values as unknown fields, and all is well. Args: message: Proto message we've decoded thus far. encoded_message: JSON string we're decoding. Returns: message, with any unknown enums stored as unrecognized fields." "def _add_thread(self, aThread): """""" Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object. """""" ## if not isinstance(aThread, Thread): ## if hasattr(aThread, '__class__'): ## typename = aThread.__class__.__name__ ## else: ## typename = str(type(aThread)) ## msg = ""Expected Thread, got %s instead"" % typename ## raise TypeError(msg) dwThreadId = aThread.dwThreadId ## if dwThreadId in self.__threadDict: ## msg = ""Already have a Thread object with ID %d"" % dwThreadId ## raise KeyError(msg) aThread.set_process(self) self.__threadDict[dwThreadId] = aThread","Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object." "def snap_to_beginning_of_week(day, weekday_start=""Sunday""): """""" Get the first day of the current week. :param day: The input date to snap. :param weekday_start: Either ""Monday"" or ""Sunday"", indicating the first day of the week. :returns: A date representing the first day of the current week. """""" delta_days = ((day.weekday() + 1) % 7) if weekday_start is ""Sunday"" else day.weekday() return day - timedelta(days=delta_days)","Get the first day of the current week. :param day: The input date to snap. :param weekday_start: Either ""Monday"" or ""Sunday"", indicating the first day of the week. :returns: A date representing the first day of the current week." "def _GetDatabaseAccount(self): """"""Gets the database account first by using the default endpoint, and if that doesn't returns use the endpoints for the preferred locations in the order they are specified to get the database account. """""" try: database_account = self._GetDatabaseAccountStub(self.DefaultEndpoint) return database_account # If for any reason(non-globaldb related), we are not able to get the database account from the above call to GetDatabaseAccount, # we would try to get this information from any of the preferred locations that the user might have specified(by creating a locational endpoint) # and keeping eating the exception until we get the database account and return None at the end, if we are not able to get that info from any endpoints except errors.HTTPFailure: for location_name in self.PreferredLocations: locational_endpoint = _GlobalEndpointManager.GetLocationalEndpoint(self.DefaultEndpoint, location_name) try: database_account = self._GetDatabaseAccountStub(locational_endpoint) return database_account except errors.HTTPFailure: pass return None","Gets the database account first by using the default endpoint, and if that doesn't returns use the endpoints for the preferred locations in the order they are specified to get the database account." "def process_files(): """""" Use Manager and Counter as context managers """""" with enlighten.Manager() as manager: with manager.counter(total=SPLINES, desc='Reticulating:', unit='splines') as retic: for num in range(SPLINES): # pylint: disable=unused-variable time.sleep(random.uniform(0.1, 0.5)) # Random processing time retic.update() with manager.counter(total=LLAMAS, desc='Herding:', unit='llamas') as herd: for num in range(SPLINES): # pylint: disable=unused-variable time.sleep(random.uniform(0.1, 0.5)) # Random processing time herd.update()",Use Manager and Counter as context managers "def visibility(self, visibility): """""" Sets the visibility of this DatasetPatchRequest. Dataset visibility. `OPEN` if the dataset can be seen by any member of data.world. `PRIVATE` if the dataset can be seen by its owner and authorized collaborators. :param visibility: The visibility of this DatasetPatchRequest. :type: str """""" allowed_values = [""OPEN"", ""PRIVATE""] if visibility not in allowed_values: raise ValueError( ""Invalid value for `visibility` ({0}), must be one of {1}"" .format(visibility, allowed_values) ) self._visibility = visibility","Sets the visibility of this DatasetPatchRequest. Dataset visibility. `OPEN` if the dataset can be seen by any member of data.world. `PRIVATE` if the dataset can be seen by its owner and authorized collaborators. :param visibility: The visibility of this DatasetPatchRequest. :type: str" "def _cost_func(x, kernel_options, tuning_options, runner, results, cache): """""" Cost function used by minimize """""" error_time = 1e20 logging.debug('_cost_func called') logging.debug('x: ' + str(x)) x_key = "","".join([str(i) for i in x]) if x_key in cache: return cache[x_key] #snap values in x to nearest actual value for each parameter unscale x if needed if tuning_options.scaling: params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps) else: params = snap_to_nearest_config(x, tuning_options.tune_params) logging.debug('params ' + str(params)) x_int = "","".join([str(i) for i in params]) if x_int in cache: return cache[x_int] #check if this is a legal (non-restricted) parameter instance if tuning_options.restrictions: legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose) if not legal: cache[x_int] = error_time cache[x_key] = error_time return error_time #compile and benchmark this instance res, _ = runner.run([params], kernel_options, tuning_options) #append to tuning results if res: results.append(res[0]) cache[x_int] = res[0]['time'] cache[x_key] = res[0]['time'] return res[0]['time'] cache[x_int] = error_time cache[x_key] = error_time return error_time",Cost function used by minimize "def get_contained_labels(self, inplace=True): """""" Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann. """""" if self.custom_labels is not None: self.check_field('custom_labels') # Create the label map label_map = ann_label_table.copy() # Convert the tuple triplets into a pandas dataframe if needed if isinstance(self.custom_labels, (list, tuple)): custom_labels = label_triplets_to_df(self.custom_labels) elif isinstance(self.custom_labels, pd.DataFrame): # Set the index just in case it doesn't already match the label_store self.custom_labels.set_index( self.custom_labels['label_store'].values, inplace=True) custom_labels = self.custom_labels else: custom_labels = None # Merge the standard wfdb labels with the custom labels. # custom labels values overwrite standard wfdb if overlap. if custom_labels is not None: for i in custom_labels.index: label_map.loc[i] = custom_labels.loc[i] # This doesn't work... # label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index] # Get the labels using one of the features if self.label_store is not None: index_vals = set(self.label_store) reset_index = False counts = np.unique(self.label_store, return_counts=True) elif self.symbol is not None: index_vals = set(self.symbol) label_map.set_index(label_map['symbol'].values, inplace=True) reset_index = True counts = np.unique(self.symbol, return_counts=True) elif self.description is not None: index_vals = set(self.description) label_map.set_index(label_map['description'].values, inplace=True) reset_index = True counts = np.unique(self.description, return_counts=True) else: raise Exception('No annotation labels contained in object') contained_labels = label_map.loc[index_vals, :] # Add the counts for i in range(len(counts[0])): contained_labels.loc[counts[0][i], 'n_occurrences'] = counts[1][i] contained_labels['n_occurrences'] = pd.to_numeric(contained_labels['n_occurrences'], downcast='integer') if reset_index: contained_labels.set_index(contained_labels['label_store'].values, inplace=True) if inplace: self.contained_labels = contained_labels return else: return contained_labels","Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann." "def download_go_basic_obo(obo=""go-basic.obo"", prt=sys.stdout, loading_bar=True): """"""Download Ontologies, if necessary."""""" if not os.path.isfile(obo): http = ""http://purl.obolibrary.org/obo/go"" if ""slim"" in obo: http = ""http://www.geneontology.org/ontology/subsets"" # http = 'http://current.geneontology.org/ontology/subsets' obo_remote = ""{HTTP}/{OBO}"".format(HTTP=http, OBO=os.path.basename(obo)) dnld_file(obo_remote, obo, prt, loading_bar) else: if prt is not None: prt.write("" EXISTS: {FILE}\n"".format(FILE=obo)) return obo","Download Ontologies, if necessary." "def get_found_includes(self, env, scanner, path): """"""Return the included implicit dependencies in this file. Cache results so we only scan the file once per path regardless of how many times this information is requested. """""" memo_key = (id(env), id(scanner), path) try: memo_dict = self._memo['get_found_includes'] except KeyError: memo_dict = {} self._memo['get_found_includes'] = memo_dict else: try: return memo_dict[memo_key] except KeyError: pass if scanner: result = [n.disambiguate() for n in scanner(self, env, path)] else: result = [] memo_dict[memo_key] = result return result","Return the included implicit dependencies in this file. Cache results so we only scan the file once per path regardless of how many times this information is requested." "def get_options(server): """"""Retrieve the available HTTP verbs"""""" try: response = requests.options( server, allow_redirects=False, verify=False, timeout=5) except (requests.exceptions.ConnectionError, requests.exceptions.MissingSchema): return ""Server {} is not available!"".format(server) try: return {'allowed': response.headers['Allow']} except KeyError: return ""Unable to get HTTP methods""",Retrieve the available HTTP verbs "def set_cursor_x(self, x): """""" Set Screen Cursor X Position """""" if x >= 0 and x <= self.server.server_info.get(""screen_width""): self.cursor_x = x self.server.request(""screen_set %s cursor_x %i"" % (self.ref, self.cursor_x))",Set Screen Cursor X Position "def anonymized_formula(self): """""" An anonymized formula. Appends charge to the end of anonymized composition """""" anon_formula = super().anonymized_formula chg = self._charge chg_str = """" if chg > 0: chg_str += (""{}{}"".format('+', str(int(chg)))) elif chg < 0: chg_str += (""{}{}"".format('-', str(int(np.abs(chg))))) return anon_formula + chg_str","An anonymized formula. Appends charge to the end of anonymized composition" "def mark_all_as_done(self, **kwargs): """"""Mark all the todos as done. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTodoError: If the server failed to perform the request Returns: int: The number of todos maked done """""" result = self.gitlab.http_post('/todos/mark_as_done', **kwargs) try: return int(result) except ValueError: return 0","Mark all the todos as done. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTodoError: If the server failed to perform the request Returns: int: The number of todos maked done" "def expand(self, msgpos): """"""expand message at given position"""""" MT = self._tree[msgpos] MT.expand(MT.root)",expand message at given position "def discover() -> List[Tuple[str, str]]: """""" Scan for connected modules and instantiate handler classes """""" if IS_ROBOT and os.path.isdir('/dev/modules'): devices = os.listdir('/dev/modules') else: devices = [] discovered_modules = [] module_port_regex = re.compile('|'.join(MODULE_TYPES.keys()), re.I) for port in devices: match = module_port_regex.search(port) if match: name = match.group().lower() if name not in MODULE_TYPES: log.warning(""Unexpected module connected: {} on {}"" .format(name, port)) continue absolute_port = '/dev/modules/{}'.format(port) discovered_modules.append((absolute_port, name)) log.info('Discovered modules: {}'.format(discovered_modules)) return discovered_modules",Scan for connected modules and instantiate handler classes "def old_properties_names_to_new(self): # pragma: no cover, never called """"""Convert old Nagios2 names to Nagios3 new names TODO: still useful? :return: None """""" for i in itertools.chain(iter(list(self.items.values())), iter(list(self.templates.values()))): i.old_properties_names_to_new()","Convert old Nagios2 names to Nagios3 new names TODO: still useful? :return: None" "def LikelihoodFunction(Template, Data, PSD, detRespP, detGCDelay=0): """""" LikelihoodFunction - function to calculate the likelihood of livePoint, given Data. Template - (N_fd) complex array containing Fourier domain trial signal. Data - (N_fd) complex array containing Fourier domain GW data. PSD - Noise power spectral density for a gravitational wave detector. detRespP - Antenna response to the plus GW polarisation for the detector. detGCDelay - Time delay of detector from geocenter (default = 0, use detGCDelay only if computing logL for more than one detector. Returns logL of Template. Sarah Gossan 2012. Last updated 02/18/14. """""" # Correct template for geocenter delay and antenna response function if detGCDelay: phaseGCDelay = -2.*np.pi*np.linspace(0,N_fd-1,num=N_fd)*dF*detGCDelay*1j Template *= phaseGCDelay Template *= detRespP # Calculate logL - simple Gaussian logL = -2.*dF*np.sum(pow(abs(Data[lowBin:] - Template[lowBin:]),2.)/\ PSD[lowBin:]) return logL","LikelihoodFunction - function to calculate the likelihood of livePoint, given Data. Template - (N_fd) complex array containing Fourier domain trial signal. Data - (N_fd) complex array containing Fourier domain GW data. PSD - Noise power spectral density for a gravitational wave detector. detRespP - Antenna response to the plus GW polarisation for the detector. detGCDelay - Time delay of detector from geocenter (default = 0, use detGCDelay only if computing logL for more than one detector. Returns logL of Template. Sarah Gossan 2012. Last updated 02/18/14." "def _remove_child_node(node, context, xast, if_empty=False): '''Remove a child node based on the specified xpath. :param node: lxml element relative to which the xpath will be interpreted :param context: any context required for the xpath (e.g., namespace definitions) :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param if_empty: optional boolean; only remove a node if it is empty (no attributes and no child nodes); defaults to False :returns: True if a node was deleted ''' xpath = serialize(xast) child = _find_xml_node(xpath, node, context) if child is not None: # if if_empty was specified and node has children or attributes # other than any predicates defined in the xpath, don't remove if if_empty is True and \ not _empty_except_predicates(xast, child, context): return False node.remove(child) return True","Remove a child node based on the specified xpath. :param node: lxml element relative to which the xpath will be interpreted :param context: any context required for the xpath (e.g., namespace definitions) :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param if_empty: optional boolean; only remove a node if it is empty (no attributes and no child nodes); defaults to False :returns: True if a node was deleted" "def isAvailable(self, requester, access): """""" Return a boolean whether the lock is available for claiming """""" debuglog(""%s isAvailable(%s, %s): self.owners=%r"" % (self, requester, access, self.owners)) num_excl, num_counting = self._claimed_excl, self._claimed_counting # Find all waiters ahead of the requester in the wait queue for idx, waiter in enumerate(self.waiting): if waiter[0] is requester: w_index = idx break else: w_index = len(self.waiting) ahead = self.waiting[:w_index] if access.mode == 'counting': # Wants counting access return num_excl == 0 and num_counting + len(ahead) < self.maxCount \ and all([w[1].mode == 'counting' for w in ahead]) # else Wants exclusive access return num_excl == 0 and num_counting == 0 and not ahead",Return a boolean whether the lock is available for claiming "def get_children(self, usage_id_filter=None): """""" Return instantiated XBlocks for each of this blocks ``children``. """""" if not self.has_children: return [] return [ self.get_child(usage_id) for usage_id in self.children if usage_id_filter is None or usage_id_filter(usage_id) ]",Return instantiated XBlocks for each of this blocks ``children``. "def _get_mirror_urls(self, mirrors=None, main_mirror_url=None): """"""Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed. """""" if not mirrors: mirrors = get_mirrors(main_mirror_url) # Should this be made ""less random""? E.g. netselect like? random.shuffle(mirrors) mirror_urls = set() for mirror_url in mirrors: # Make sure we have a valid URL if not (""http://"" or ""https://"" or ""file://"") in mirror_url: mirror_url = ""http://%s"" % mirror_url if not mirror_url.endswith(""/simple""): mirror_url = ""%s/simple/"" % mirror_url mirror_urls.add(mirror_url) return list(mirror_urls)","Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed." "def table_driven(self, in_data): """""" The Standard table_driven CRC algorithm. """""" # If the input data is a string, convert to bytes. if isinstance(in_data, str): in_data = [ord(c) for c in in_data] tbl = self.gen_table() register = self.DirectInit << self.CrcShift if not self.ReflectIn: for octet in in_data: tblidx = ((register >> (self.Width - self.TableIdxWidth + self.CrcShift)) ^ octet) & 0xff register = ((register << (self.TableIdxWidth - self.CrcShift)) ^ tbl[tblidx]) & (self.Mask << self.CrcShift) register = register >> self.CrcShift else: register = self.reflect(register, self.Width + self.CrcShift) << self.CrcShift for octet in in_data: tblidx = ((register >> self.CrcShift) ^ octet) & 0xff register = ((register >> self.TableIdxWidth) ^ tbl[tblidx]) & (self.Mask << self.CrcShift) register = self.reflect(register, self.Width + self.CrcShift) & self.Mask if self.ReflectOut: register = self.reflect(register, self.Width) return register ^ self.XorOut",The Standard table_driven CRC algorithm. "def attach_protocol(self, proto): """""" returns a Deferred that fires once we've set this object up to track the protocol. Fails if we already have a protocol. """""" if self._protocol is not None: raise RuntimeError(""Already have a protocol."") # make sure we have nothing in self.unsaved self.save() self.__dict__['_protocol'] = proto # FIXME some of this is duplicated from ctor del self.__dict__['_accept_all_'] self.__dict__['post_bootstrap'] = defer.Deferred() if proto.post_bootstrap: proto.post_bootstrap.addCallback(self.bootstrap) return self.__dict__['post_bootstrap']","returns a Deferred that fires once we've set this object up to track the protocol. Fails if we already have a protocol." "def get_rlz(self, rlzstr): r"""""" Get a Realization instance for a string of the form 'rlz-\d+' """""" mo = re.match(r'rlz-(\d+)', rlzstr) if not mo: return return self.realizations[int(mo.group(1))]","r"""""" Get a Realization instance for a string of the form 'rlz-\d+'" "def deploy(self, initial_instance_count, instance_type, accelerator_type=None, endpoint_name=None, use_compiled_model=False, update_endpoint=False, **kwargs): """"""Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object. More information: http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html Args: initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of the training job is used. use_compiled_model (bool): Flag to select whether to use compiled (optimized) model. Default: False. update_endpoint (bool): Flag to update the model in an existing Amazon SageMaker endpoint. If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources corresponding to the previous EndpointConfig. Default: False tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific endpoint. Example: >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}] For more information about tags, see https://boto3.amazonaws.com/v1/documentation\ /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags **kwargs: Passed to invocation of ``create_model()``. Implementations may customize ``create_model()`` to accept ``**kwargs`` to customize model creation during deploy. For more, see the implementation docs. Returns: sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method, which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences. """""" self._ensure_latest_training_job() endpoint_name = endpoint_name or self.latest_training_job.name self.deploy_instance_type = instance_type if use_compiled_model: family = '_'.join(instance_type.split('.')[:-1]) if family not in self._compiled_models: raise ValueError(""No compiled model for {}. "" ""Please compile one with compile_model before deploying."".format(family)) model = self._compiled_models[family] else: model = self.create_model(**kwargs) return model.deploy( instance_type=instance_type, initial_instance_count=initial_instance_count, accelerator_type=accelerator_type, endpoint_name=endpoint_name, update_endpoint=update_endpoint, tags=self.tags)","Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object. More information: http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html Args: initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of the training job is used. use_compiled_model (bool): Flag to select whether to use compiled (optimized) model. Default: False. update_endpoint (bool): Flag to update the model in an existing Amazon SageMaker endpoint. If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources corresponding to the previous EndpointConfig. Default: False tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific endpoint. Example: >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}] For more information about tags, see https://boto3.amazonaws.com/v1/documentation\ /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags **kwargs: Passed to invocation of ``create_model()``. Implementations may customize ``create_model()`` to accept ``**kwargs`` to customize model creation during deploy. For more, see the implementation docs. Returns: sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method, which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences." "def TXT(host, nameserver=None): ''' Return the TXT record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.TXT google.com ''' dig = ['dig', '+short', six.text_type(host), 'TXT'] if nameserver is not None: dig.append('@{0}'.format(nameserver)) cmd = __salt__['cmd.run_all'](dig, python_shell=False) if cmd['retcode'] != 0: log.warning( 'dig returned exit code \'%s\'. Returning empty list as fallback.', cmd['retcode'] ) return [] return [i for i in cmd['stdout'].split('\n')]","Return the TXT record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.TXT google.com" "def _get_proj_specific_params(self, projection): """"""Convert CF projection parameters to PROJ.4 dict."""""" proj = self._get_proj4_name(projection) proj_dict = { 'proj': proj, 'a': float(projection.attrs['semi_major_axis']), 'b': float(projection.attrs['semi_minor_axis']), 'units': 'm', } if proj == 'geos': proj_dict['h'] = float(projection.attrs['perspective_point_height']) proj_dict['sweep'] = projection.attrs.get('sweep_angle_axis', 'y') proj_dict['lon_0'] = float(projection.attrs['longitude_of_projection_origin']) proj_dict['lat_0'] = float(projection.attrs.get('latitude_of_projection_origin', 0.0)) elif proj == 'lcc': proj_dict['lat_0'] = float(projection.attrs['standard_parallel']) proj_dict['lon_0'] = float(projection.attrs['longitude_of_central_meridian']) proj_dict['lat_1'] = float(projection.attrs['latitude_of_projection_origin']) elif proj == 'stere': proj_dict['lat_ts'] = float(projection.attrs['standard_parallel']) proj_dict['lon_0'] = float(projection.attrs['straight_vertical_longitude_from_pole']) proj_dict['lat_0'] = float(projection.attrs['latitude_of_projection_origin']) elif proj == 'merc': proj_dict['lat_ts'] = float(projection.attrs['standard_parallel']) proj_dict['lat_0'] = proj_dict['lat_ts'] proj_dict['lon_0'] = float(projection.attrs['longitude_of_projection_origin']) else: raise ValueError(""Can't handle projection '{}'"".format(proj)) return proj_dict",Convert CF projection parameters to PROJ.4 dict. "def check_cursor_location(self): """"""Check whether the data location of the last known position of the cursor has changed. If so, issue a callback. """""" # Check whether cursor data position has changed relative # to previous value data_x, data_y = self.get_data_xy(self.last_win_x, self.last_win_y) if (data_x != self.last_data_x or data_y != self.last_data_y): self.last_data_x, self.last_data_y = data_x, data_y self.logger.debug(""cursor location changed %.4f,%.4f => %.4f,%.4f"" % ( self.last_data_x, self.last_data_y, data_x, data_y)) # we make this call compatible with the motion callback # for now, but there is no concept of a button here button = 0 self.make_ui_callback('cursor-changed', button, data_x, data_y) return data_x, data_y","Check whether the data location of the last known position of the cursor has changed. If so, issue a callback." "def author_json_details(author, author_json, contributions, correspondence, competing_interests, equal_contributions_map, present_address_data, foot_notes_data, html_flag=True): # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) """"""add more author json"""""" if author_affiliations(author): author_json[""affiliations""] = author_affiliations(author) # foot notes or additionalInformation if author_foot_notes(author, foot_notes_data): author_json[""additionalInformation""] = author_foot_notes(author, foot_notes_data) # email if author_email_addresses(author, correspondence): author_json[""emailAddresses""] = author_email_addresses(author, correspondence) # phone if author_phone_numbers(author, correspondence): author_json[""phoneNumbers""] = author_phone_numbers_json(author, correspondence) # contributions if author_contribution(author, contributions): author_json[""contribution""] = convert(author_contribution(author, contributions)) # competing interests if author_competing_interests(author, competing_interests): author_json[""competingInterests""] = convert( author_competing_interests(author, competing_interests)) # equal-contributions if author_equal_contribution(author, equal_contributions_map): author_json[""equalContributionGroups""] = author_equal_contribution(author, equal_contributions_map) # postalAddress if author_present_address(author, present_address_data): author_json[""postalAddresses""] = author_present_address(author, present_address_data) return author_json",add more author json "def forwards(apps, schema_editor): """""" Migrate all 'exhibition' Events to the new 'museum' Event kind. """""" Event = apps.get_model('spectator_events', 'Event') for ev in Event.objects.filter(kind='exhibition'): ev.kind = 'museum' ev.save()",Migrate all 'exhibition' Events to the new 'museum' Event kind. "def qhalfx(self): """"""get the half normal matrix attribute. Create the attribute if it has not yet been created Returns ------- qhalfx : pyemu.Matrix """""" if self.__qhalfx is None: self.log(""qhalfx"") self.__qhalfx = self.qhalf * self.jco self.log(""qhalfx"") return self.__qhalfx","get the half normal matrix attribute. Create the attribute if it has not yet been created Returns ------- qhalfx : pyemu.Matrix" "def replace_rep(t:str) -> str: ""Replace repetitions at the character level in `t`."" def _replace_rep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_REP} {len(cc)+1} {c} ' re_rep = re.compile(r'(\S)(\1{3,})') return re_rep.sub(_replace_rep, t)",Replace repetitions at the character level in `t`. "def __doQuery(self, query, format, convert): """""" Inner method that does the actual query """""" self.__getFormat(format) self.sparql.setQuery(query) if convert: results = self.sparql.query().convert() else: results = self.sparql.query() return results",Inner method that does the actual query "def load_png(varNumVol, strPathPng, tplVslSpcSze=(200, 200), varStrtIdx=0, varZfill=3): """""" Load PNGs with stimulus information for pRF model creation. Parameters ---------- varNumVol : int Number of PNG files. strPathPng : str Parent directory of PNG files. PNG files need to be organsied in numerical order (e.g. `file_001.png`, `file_002.png`, etc.). tplVslSpcSze : tuple Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it is useful to sample at a lower than the original resolution. varStrtIdx : int Start index of PNG files. For instance, `varStrtIdx = 0` if the name of the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is `file_001.png`. varZfill : int Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is `file_0007.png`. Returns ------- aryPngData : np.array 3D Numpy array with the following structure: aryPngData[x-pixel-index, y-pixel-index, PngNumber] Notes ----- Part of py_pRF_mapping library. """""" # Create list of png files to load: lstPngPaths = [None] * varNumVol for idx01 in range(0, varNumVol): lstPngPaths[idx01] = (strPathPng + str(idx01 + varStrtIdx).zfill(varZfill) + '.png') # The png data will be saved in a numpy array of the following order: # aryPngData[x-pixel, y-pixel, PngNumber]. aryPngData = np.zeros((tplVslSpcSze[0], tplVslSpcSze[1], varNumVol)) # Open first image in order to check dimensions (greyscale or RGB, i.e. 2D # or 3D). objIm = Image.open(lstPngPaths[0]) aryTest = np.array(objIm.resize((objIm.size[0], objIm.size[1]), Image.ANTIALIAS)) varNumDim = aryTest.ndim del(aryTest) # Loop trough PNG files: for idx01 in range(0, varNumVol): # Old version of reading images with scipy # aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0] # aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :] # Load & resize image: objIm = Image.open(lstPngPaths[idx01]) objIm = objIm.resize((tplVslSpcSze[0], tplVslSpcSze[1]), resample=Image.NEAREST) # Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D # or 3D): if varNumDim == 2: aryPngData[:, :, idx01] = np.array(objIm.resize( (objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :] elif varNumDim == 3: aryPngData[:, :, idx01] = np.array(objIm.resize( (objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :, 0] else: # Error message: strErrMsg = ('ERROR: PNG files for model creation need to be RGB ' + 'or greyscale.') raise ValueError(strErrMsg) # Convert RGB values (0 to 255) to integer ones and zeros: aryPngData = (aryPngData > 200).astype(np.int8) return aryPngData","Load PNGs with stimulus information for pRF model creation. Parameters ---------- varNumVol : int Number of PNG files. strPathPng : str Parent directory of PNG files. PNG files need to be organsied in numerical order (e.g. `file_001.png`, `file_002.png`, etc.). tplVslSpcSze : tuple Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it is useful to sample at a lower than the original resolution. varStrtIdx : int Start index of PNG files. For instance, `varStrtIdx = 0` if the name of the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is `file_001.png`. varZfill : int Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is `file_0007.png`. Returns ------- aryPngData : np.array 3D Numpy array with the following structure: aryPngData[x-pixel-index, y-pixel-index, PngNumber] Notes ----- Part of py_pRF_mapping library." "def send(self, uid, event, payload=None): """""" Send an event to a connected controller. Use pymlgame event type and correct payload. To send a message to the controller use pymlgame.E_MESSAGE event and a string as payload. :param uid: Unique id of the controller :param event: Event type :param payload: Payload of the event :type uid: str :type event: Event :type payload: str :return: Number of bytes send or False :rtype: int """""" sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) if uid in self.controllers.keys(): addr = self.controllers[uid][0] port = self.controllers[uid][1] if event == E_MESSAGE: #print('/message/{} => {}:{}'.format(payload, addr, port)) return sock.sendto('/message/{}'.format(payload).encode('utf-8'), (addr, port)) elif event == E_RUMBLE: #print('/rumble/{} => {}:{}'.format(payload, addr, port)) return sock.sendto('/rumble/{}'.format(payload).encode('utf-8'), (addr, port)) else: pass else: pass return False","Send an event to a connected controller. Use pymlgame event type and correct payload. To send a message to the controller use pymlgame.E_MESSAGE event and a string as payload. :param uid: Unique id of the controller :param event: Event type :param payload: Payload of the event :type uid: str :type event: Event :type payload: str :return: Number of bytes send or False :rtype: int" "def batch_eval(self, exprs, n, extra_constraints=(), solver=None, model_callback=None): """""" Evaluate one or multiple expressions. :param exprs: A list of expressions to evaluate. :param n: Number of different solutions to return. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver object, native to the backend, to assist in the evaluation. :param model_callback: a function that will be executed with recovered models (if any) :return: A list of up to n tuples, where each tuple is a solution for all expressions. """""" if self._solver_required and solver is None: raise BackendError(""%s requires a solver for batch evaluation"" % self.__class__.__name__) converted_exprs = [ self.convert(ex) for ex in exprs ] return self._batch_eval( converted_exprs, n, extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback )","Evaluate one or multiple expressions. :param exprs: A list of expressions to evaluate. :param n: Number of different solutions to return. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver object, native to the backend, to assist in the evaluation. :param model_callback: a function that will be executed with recovered models (if any) :return: A list of up to n tuples, where each tuple is a solution for all expressions." "def copy_attributes(self, dst_fi, attrlist, share=False): """"""Copy interesting attributes of our configuration to another image view. Parameters ---------- dst_fi : subclass of `ImageViewBase` Another instance of image view. attrlist : list A list of attribute names to copy. They can be ``'transforms'``, ``'rotation'``, ``'cutlevels'``, ``'rgbmap'``, ``'zoom'``, ``'pan'``, ``'autocuts'``. share : bool If True, the designated settings will be shared, otherwise the values are simply copied. """""" # TODO: change API to just go with settings names? keylist = [] if 'transforms' in attrlist: keylist.extend(['flip_x', 'flip_y', 'swap_xy']) if 'rotation' in attrlist: keylist.extend(['rot_deg']) if 'autocuts' in attrlist: keylist.extend(['autocut_method', 'autocut_params']) if 'cutlevels' in attrlist: keylist.extend(['cuts']) if 'rgbmap' in attrlist: keylist.extend(['color_algorithm', 'color_hashsize', 'color_map', 'intensity_map', 'color_array', 'shift_array']) if 'zoom' in attrlist: keylist.extend(['scale']) if 'pan' in attrlist: keylist.extend(['pan']) with dst_fi.suppress_redraw: if share: self.t_.share_settings(dst_fi.get_settings(), keylist=keylist) else: self.t_.copy_settings(dst_fi.get_settings(), keylist=keylist) dst_fi.redraw(whence=0)","Copy interesting attributes of our configuration to another image view. Parameters ---------- dst_fi : subclass of `ImageViewBase` Another instance of image view. attrlist : list A list of attribute names to copy. They can be ``'transforms'``, ``'rotation'``, ``'cutlevels'``, ``'rgbmap'``, ``'zoom'``, ``'pan'``, ``'autocuts'``. share : bool If True, the designated settings will be shared, otherwise the values are simply copied." "def _make_cron_re(): """""" Make the regular expression that matches a crontab 'cron' line. Each field has a set of allowed values, and can then be in a range, and be listed with dashes. A range can be stepped with the '/' modifier, and ranges can be in a list. A field can also be '*', or '*' divided in steps. The best way to do this is to have a template for a single field that encapsulates the syntax of that field, regardless of what that field matches. We then fill in the actual template's value with the pattern that matches that field. Each field is named, so we can pull them out as a dictionary later. """""" range_ = r""{val}(?:-{val}(?:/\d+)?)?"" template = r""(?P<{name}>"" + ""(?:\*(?:/\d+)?|{r}(?:,{r})*)"".format(r=range_) + "")\s+"" return ( r'^\s*' + template.format(name='minute', val=r'(?:\d|[012345]\d)') + template.format(name='hour', val=r'(?:\d|[01]\d|2[0123])') + template.format(name='day_of_month', val=r'(?:0?[1-9]|[12]\d|3[01])') + template.format(name='month', val=r'(?:0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)') + template.format(name='day_of_week', val=r'(?:[0-7]|mon|tue|wed|thur|fri|sat|sun)') + r'(?P\S.*)$' )","Make the regular expression that matches a crontab 'cron' line. Each field has a set of allowed values, and can then be in a range, and be listed with dashes. A range can be stepped with the '/' modifier, and ranges can be in a list. A field can also be '*', or '*' divided in steps. The best way to do this is to have a template for a single field that encapsulates the syntax of that field, regardless of what that field matches. We then fill in the actual template's value with the pattern that matches that field. Each field is named, so we can pull them out as a dictionary later." "def _search_url_titles(self, title): """"""Search the URL titles by kind for the given `title`. :param str title: title to search for. :return: the URL titles by kind. :rtype: collections.defaultdict """""" # make the search logger.info('Searching title name for %r', title) r = self.session.get(self.server_url + 'subtitle/search/', params={'q': title}, timeout=10) r.raise_for_status() # check for redirections if r.history and all([h.status_code == 302 for h in r.history]): logger.debug('Redirected to the subtitles page') links = [r.url] else: # get the suggestions (if needed) soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) links = [link.attrs['href'] for link in soup.select('#processes div.generalWindowTop a')] logger.debug('Found %d suggestions', len(links)) url_titles = defaultdict(list) for link in links: parts = link.split('/') url_titles[parts[-3]].append(parts[-2]) return url_titles","Search the URL titles by kind for the given `title`. :param str title: title to search for. :return: the URL titles by kind. :rtype: collections.defaultdict" "def _fbp_filter(norm_freq, filter_type, frequency_scaling): """"""Create a smoothing filter for FBP. Parameters ---------- norm_freq : `array-like` Frequencies normalized to lie in the interval [0, 1]. filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann', callable} The type of filter to be used. If a string is given, use one of the standard filters with that name. A callable should take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float Scaling of the frequencies for the filter. All frequencies are scaled by this number, any relative frequency above ``frequency_scaling`` is set to 0. Returns ------- smoothing_filter : `numpy.ndarray` Examples -------- Create an FBP filter >>> norm_freq = np.linspace(0, 1, 10) >>> filt = _fbp_filter(norm_freq, ... filter_type='Hann', ... frequency_scaling=0.8) """""" filter_type, filter_type_in = str(filter_type).lower(), filter_type if callable(filter_type): filt = filter_type(norm_freq) elif filter_type == 'ram-lak': filt = np.copy(norm_freq) elif filter_type == 'shepp-logan': filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling)) elif filter_type == 'cosine': filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling)) elif filter_type == 'hamming': filt = norm_freq * ( 0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling))) elif filter_type == 'hann': filt = norm_freq * ( np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2) else: raise ValueError('unknown `filter_type` ({})' ''.format(filter_type_in)) indicator = (norm_freq <= frequency_scaling) filt *= indicator return filt","Create a smoothing filter for FBP. Parameters ---------- norm_freq : `array-like` Frequencies normalized to lie in the interval [0, 1]. filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann', callable} The type of filter to be used. If a string is given, use one of the standard filters with that name. A callable should take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float Scaling of the frequencies for the filter. All frequencies are scaled by this number, any relative frequency above ``frequency_scaling`` is set to 0. Returns ------- smoothing_filter : `numpy.ndarray` Examples -------- Create an FBP filter >>> norm_freq = np.linspace(0, 1, 10) >>> filt = _fbp_filter(norm_freq, ... filter_type='Hann', ... frequency_scaling=0.8)" "def is_sub_to_all(self, *super_entities): """""" Given a list of super entities, return the entities that have those as a subset of their super entities. """""" if super_entities: if len(super_entities) == 1: # Optimize for the case of just one super entity since this is a much less intensive query has_subset = EntityRelationship.objects.filter( super_entity=super_entities[0]).values_list('sub_entity', flat=True) else: # Get a list of entities that have super entities with all types has_subset = EntityRelationship.objects.filter( super_entity__in=super_entities).values('sub_entity').annotate(Count('super_entity')).filter( super_entity__count=len(set(super_entities))).values_list('sub_entity', flat=True) return self.filter(id__in=has_subset) else: return self","Given a list of super entities, return the entities that have those as a subset of their super entities." "def reset(self, document, parent, level): """"""Reset the state of state machine. After reset, self and self.state can be used to passed to docutils.parsers.rst.Directive.run Parameters ---------- document: docutils document Current document of the node. parent: parent node Parent node that will be used to interpret role and directives. level: int Current section level. """""" self.language = languages.get_language( document.settings.language_code) # setup memo self.memo.document = document self.memo.reporter = document.reporter self.memo.language = self.language self.memo.section_level = level # setup inliner if self.memo.inliner is None: self.memo.inliner = Inliner() self.memo.inliner.init_customizations(document.settings) inliner = self.memo.inliner inliner.reporter = document.reporter inliner.document = document inliner.language = self.language inliner.parent = parent # setup self self.document = document self.reporter = self.memo.reporter self.node = parent self.state.runtime_init() self.input_lines = document['source']","Reset the state of state machine. After reset, self and self.state can be used to passed to docutils.parsers.rst.Directive.run Parameters ---------- document: docutils document Current document of the node. parent: parent node Parent node that will be used to interpret role and directives. level: int Current section level." "def send_file(request, filename, content_type='image/jpeg'): """""" Send a file through Django without loading the whole file into memory at once. The FileWrapper will turn the file object into an iterator for chunks of 8KB. """""" wrapper = FixedFileWrapper(file(filename, 'rb')) response = HttpResponse(wrapper, content_type=content_type) response['Content-Length'] = os.path.getsize(filename) return response","Send a file through Django without loading the whole file into memory at once. The FileWrapper will turn the file object into an iterator for chunks of 8KB." "def execute_batch_dml( self, session, transaction, statements, seqno, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """""" Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them sequentially with ``ExecuteSql``. Statements are executed in order, sequentially. ``ExecuteBatchDmlResponse`` will contain a ``ResultSet`` for each DML statement that has successfully executed. If a statement fails, its error status will be returned as part of the ``ExecuteBatchDmlResponse``. Execution will stop at the first failed statement; the remaining statements will not run. ExecuteBatchDml is expected to return an OK status with a response even if there was an error while processing one of the DML statements. Clients must inspect response.status to determine if there were any errors while processing the request. See more details in ``ExecuteBatchDmlRequest`` and ``ExecuteBatchDmlResponse``. Example: >>> from google.cloud import spanner_v1 >>> >>> client = spanner_v1.SpannerClient() >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> >>> # TODO: Initialize `transaction`: >>> transaction = {} >>> >>> # TODO: Initialize `statements`: >>> statements = [] >>> >>> # TODO: Initialize `seqno`: >>> seqno = 0 >>> >>> response = client.execute_batch_dml(session, transaction, statements, seqno) Args: session (str): Required. The session in which the DML statements should be performed. transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. A ReadWrite transaction is required. Single-use transactions are not supported (to avoid replay). The caller must either supply an existing transaction ID or begin a new transaction. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` statements (list[Union[dict, ~google.cloud.spanner_v1.types.Statement]]): The list of statements to execute in this batch. Statements are executed serially, such that the effects of statement i are visible to statement i+1. Each statement must be a DML statement. Execution will stop at the first failed statement; the remaining statements will not run. REQUIRES: statements\_size() > 0. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Statement` seqno (long): A per-transaction sequence number used to identify this request. This is used in the same space as the seqno in ``ExecuteSqlRequest``. See more details in ``ExecuteSqlRequest``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.spanner_v1.types.ExecuteBatchDmlResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """""" # Wrap the transport method to add retry and timeout logic. if ""execute_batch_dml"" not in self._inner_api_calls: self._inner_api_calls[ ""execute_batch_dml"" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.execute_batch_dml, default_retry=self._method_configs[""ExecuteBatchDml""].retry, default_timeout=self._method_configs[""ExecuteBatchDml""].timeout, client_info=self._client_info, ) request = spanner_pb2.ExecuteBatchDmlRequest( session=session, transaction=transaction, statements=statements, seqno=seqno ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [(""session"", session)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls[""execute_batch_dml""]( request, retry=retry, timeout=timeout, metadata=metadata )","Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them sequentially with ``ExecuteSql``. Statements are executed in order, sequentially. ``ExecuteBatchDmlResponse`` will contain a ``ResultSet`` for each DML statement that has successfully executed. If a statement fails, its error status will be returned as part of the ``ExecuteBatchDmlResponse``. Execution will stop at the first failed statement; the remaining statements will not run. ExecuteBatchDml is expected to return an OK status with a response even if there was an error while processing one of the DML statements. Clients must inspect response.status to determine if there were any errors while processing the request. See more details in ``ExecuteBatchDmlRequest`` and ``ExecuteBatchDmlResponse``. Example: >>> from google.cloud import spanner_v1 >>> >>> client = spanner_v1.SpannerClient() >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> >>> # TODO: Initialize `transaction`: >>> transaction = {} >>> >>> # TODO: Initialize `statements`: >>> statements = [] >>> >>> # TODO: Initialize `seqno`: >>> seqno = 0 >>> >>> response = client.execute_batch_dml(session, transaction, statements, seqno) Args: session (str): Required. The session in which the DML statements should be performed. transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. A ReadWrite transaction is required. Single-use transactions are not supported (to avoid replay). The caller must either supply an existing transaction ID or begin a new transaction. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` statements (list[Union[dict, ~google.cloud.spanner_v1.types.Statement]]): The list of statements to execute in this batch. Statements are executed serially, such that the effects of statement i are visible to statement i+1. Each statement must be a DML statement. Execution will stop at the first failed statement; the remaining statements will not run. REQUIRES: statements\_size() > 0. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Statement` seqno (long): A per-transaction sequence number used to identify this request. This is used in the same space as the seqno in ``ExecuteSqlRequest``. See more details in ``ExecuteSqlRequest``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.spanner_v1.types.ExecuteBatchDmlResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid." "def _BuildDelete(self, subject, attribute=None, timestamp=None): """"""Build the DELETE query to be executed."""""" subjects_q = { ""query"": ""DELETE subjects FROM subjects WHERE hash=unhex(md5(%s))"", ""args"": [subject] } aff4_q = { ""query"": ""DELETE aff4 FROM aff4 WHERE subject_hash=unhex(md5(%s))"", ""args"": [subject] } locks_q = { ""query"": ""DELETE locks FROM locks WHERE subject_hash=unhex(md5(%s))"", ""args"": [subject] } if attribute: aff4_q[""query""] += "" AND attribute_hash=unhex(md5(%s))"" aff4_q[""args""].append(attribute) if isinstance(timestamp, (tuple, list)): aff4_q[""query""] += "" AND aff4.timestamp >= %s AND aff4.timestamp <= %s"" aff4_q[""args""].append(int(timestamp[0])) aff4_q[""args""].append(int(timestamp[1])) attributes_q = { ""query"": ""DELETE attributes FROM attributes LEFT JOIN aff4 ON "" ""aff4.attribute_hash=attributes.hash "" ""WHERE attributes.hash=unhex(md5(%s)) "" ""AND aff4.attribute_hash IS NULL"", ""args"": [attribute] } # If only attribute is being deleted we will not check to clean up # subject and lock tables. return [aff4_q, attributes_q] # If a subject is being deleted we clean up the locks and subjects table # but assume it has attributes that are common to other subjects return [aff4_q, locks_q, subjects_q]",Build the DELETE query to be executed. "def append(self, clause): """""" Add one more clause to CNF formula. This method additionally updates the number of variables, i.e. variable ``self.nv``, used in the formula. :param clause: a new clause to add. :type clause: list(int) .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [3]]) >>> cnf.append([-3, 4]) >>> print cnf.clauses [[-1, 2], [3], [-3, 4]] """""" self.nv = max([abs(l) for l in clause] + [self.nv]) self.clauses.append(clause)","Add one more clause to CNF formula. This method additionally updates the number of variables, i.e. variable ``self.nv``, used in the formula. :param clause: a new clause to add. :type clause: list(int) .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [3]]) >>> cnf.append([-3, 4]) >>> print cnf.clauses [[-1, 2], [3], [-3, 4]]" "def join_keys(x, y, by=None): """""" Join keys. Given two data frames, create a unique key for each row. Parameters ----------- x : dataframe y : dataframe by : list-like Column names to join by Returns ------- out : dict Dictionary with keys x and y. The values of both keys are arrays with integer elements. Identical rows in x and y dataframes would have the same key in the output. The key elements start at 1. """""" if by is None: by = slice(None, None, None) if isinstance(by, tuple): by = list(by) joint = x[by].append(y[by], ignore_index=True) keys = ninteraction(joint, drop=True) keys = np.asarray(keys) nx, ny = len(x), len(y) return {'x': keys[np.arange(nx)], 'y': keys[nx + np.arange(ny)]}","Join keys. Given two data frames, create a unique key for each row. Parameters ----------- x : dataframe y : dataframe by : list-like Column names to join by Returns ------- out : dict Dictionary with keys x and y. The values of both keys are arrays with integer elements. Identical rows in x and y dataframes would have the same key in the output. The key elements start at 1." "def bgp_time_conversion(bgp_uptime): """""" Convert string time to seconds. Examples 00:14:23 00:13:40 00:00:21 00:00:13 00:00:49 1d11h 1d17h 1w0d 8w5d 1y28w never """""" bgp_uptime = bgp_uptime.strip() uptime_letters = set(['w', 'h', 'd']) if 'never' in bgp_uptime: return -1 elif ':' in bgp_uptime: times = bgp_uptime.split("":"") times = [int(x) for x in times] hours, minutes, seconds = times return (hours * 3600) + (minutes * 60) + seconds # Check if any letters 'w', 'h', 'd' are in the time string elif uptime_letters & set(bgp_uptime): form1 = r'(\d+)d(\d+)h' # 1d17h form2 = r'(\d+)w(\d+)d' # 8w5d form3 = r'(\d+)y(\d+)w' # 1y28w match = re.search(form1, bgp_uptime) if match: days = int(match.group(1)) hours = int(match.group(2)) return (days * DAY_SECONDS) + (hours * 3600) match = re.search(form2, bgp_uptime) if match: weeks = int(match.group(1)) days = int(match.group(2)) return (weeks * WEEK_SECONDS) + (days * DAY_SECONDS) match = re.search(form3, bgp_uptime) if match: years = int(match.group(1)) weeks = int(match.group(2)) return (years * YEAR_SECONDS) + (weeks * WEEK_SECONDS) raise ValueError(""Unexpected value for BGP uptime string: {}"".format(bgp_uptime))","Convert string time to seconds. Examples 00:14:23 00:13:40 00:00:21 00:00:13 00:00:49 1d11h 1d17h 1w0d 8w5d 1y28w never" "def _ncc_c_3dim(x, y): """""" Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional y vector Returns a 3 dimensional array of normalized fourier transforms """""" den = norm(x, axis=1)[:, None] * norm(y, axis=1) den[den == 0] = np.Inf x_len = x.shape[-1] fft_size = 1 << (2*x_len-1).bit_length() cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))[:, None]) cc = np.concatenate((cc[:,:,-(x_len-1):], cc[:,:,:x_len]), axis=2) return np.real(cc) / den.T[:, :, None]","Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional y vector Returns a 3 dimensional array of normalized fourier transforms" "def _flatten_location_translations(location_translations): """"""If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation. """""" sources_to_process = set(six.iterkeys(location_translations)) def _update_translation(source): """"""Return the proper (fully-flattened) translation for the given location."""""" destination = location_translations[source] if destination not in location_translations: # ""destination"" cannot be translated, no further flattening required. return destination else: # ""destination"" can itself be translated -- do so, # and then flatten ""source"" to the final translation as well. sources_to_process.discard(destination) final_destination = _update_translation(destination) location_translations[source] = final_destination return final_destination while sources_to_process: _update_translation(sources_to_process.pop())","If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation." "def calc_svd(self, lapack_driver='gesdd'): """""" Return the SVD decomposition of data The input data np.ndarray shall be of dimension 2, with time as the first dimension, and the channels in the second Hence data should be of shape (nt, nch) Uses scipy.linalg.svd(), with: full_matrices = True compute_uv = True overwrite_a = False check_finite = True See scipy online doc for details Return ------ chronos: np.ndarray First arg (u) returned by scipy.linalg.svd() Contains the so-called 'chronos', of shape (nt, nt) i.e.: the time-dependent part of the decoposition s: np.ndarray Second arg (s) returned by scipy.linalg.svd() Contains the singular values, of shape (nch,) i.e.: the channel-dependent part of the decoposition topos: np.ndarray Third arg (v) returned by scipy.linalg.svd() Contains the so-called 'topos', of shape (nch, nch) i.e.: the channel-dependent part of the decoposition """""" if self._isSpectral(): msg = ""svd not implemented yet for spectral data class"" raise Exception(msg) chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver) return u, s, v","Return the SVD decomposition of data The input data np.ndarray shall be of dimension 2, with time as the first dimension, and the channels in the second Hence data should be of shape (nt, nch) Uses scipy.linalg.svd(), with: full_matrices = True compute_uv = True overwrite_a = False check_finite = True See scipy online doc for details Return ------ chronos: np.ndarray First arg (u) returned by scipy.linalg.svd() Contains the so-called 'chronos', of shape (nt, nt) i.e.: the time-dependent part of the decoposition s: np.ndarray Second arg (s) returned by scipy.linalg.svd() Contains the singular values, of shape (nch,) i.e.: the channel-dependent part of the decoposition topos: np.ndarray Third arg (v) returned by scipy.linalg.svd() Contains the so-called 'topos', of shape (nch, nch) i.e.: the channel-dependent part of the decoposition" "def decrypt_message_data(self, body): """""" Inverse of `encrypt_message_data` for incoming server messages. """""" if len(body) < 8: raise InvalidBufferError(body) # TODO Check salt, session_id and sequence_number key_id = struct.unpack(' typing.Union[None, str]: """""" Creates an absolute path in the project source directory from the relative path components. :param args: Relative components for creating a path within the project source directory :return: An absolute path to the specified file or directory within the project source directory. """""" if not self._project: return None return environ.paths.clean(os.path.join( self._project.source_directory, *args ))","Creates an absolute path in the project source directory from the relative path components. :param args: Relative components for creating a path within the project source directory :return: An absolute path to the specified file or directory within the project source directory." "def list_loadbalancers(self, retrieve_all=True, **_params): """"""Fetches a list of all loadbalancers for a project."""""" return self.list('loadbalancers', self.lbaas_loadbalancers_path, retrieve_all, **_params)",Fetches a list of all loadbalancers for a project. "def order_columns_in_row(fields, unordered_row): """"""Ensure columns appear in the same order for every row in table :param fields: :param unordered_row: """""" fields_idx = {f: pos for pos, f in enumerate(fields)} return OrderedDict(sorted(unordered_row.items(), key=lambda i: fields_idx[i[0]]))","Ensure columns appear in the same order for every row in table :param fields: :param unordered_row:" "def getOrCreate(cls, conf=None): """""" Get or instantiate a SparkContext and register it as a singleton object. :param conf: SparkConf (optional) """""" with SparkContext._lock: if SparkContext._active_spark_context is None: SparkContext(conf=conf or SparkConf()) return SparkContext._active_spark_context","Get or instantiate a SparkContext and register it as a singleton object. :param conf: SparkConf (optional)" "def rest(f): """"""Decorator for simple REST endpoints. Functions must return one of these values: - a dict to jsonify - nothing for an empty 204 response - a tuple containing a status code and a dict to jsonify """""" @wraps(f) def wrapper(*args, **kwargs): ret = f(*args, **kwargs) if ret is None: response = '', 204 elif isinstance(ret, current_app.response_class): response = ret elif isinstance(ret, tuple): # code, result_dict|msg_string if isinstance(ret[1], basestring): response = jsonify(msg=ret[1]) else: response = jsonify(**ret[1]) response.status_code = ret[0] else: response = jsonify(**ret) return response return wrapper","Decorator for simple REST endpoints. Functions must return one of these values: - a dict to jsonify - nothing for an empty 204 response - a tuple containing a status code and a dict to jsonify" "def first_ipv6(self) -> Optional[AddressInfo]: '''The first IPV6 address.''' for info in self._address_infos: if info.family == socket.AF_INET6: return info",The first IPV6 address. "def instance_ip_grouping_key(): """"""Grouping key with instance set to the IP Address of this host."""""" with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s: s.connect(('localhost', 0)) return {'instance': s.getsockname()[0]}",Grouping key with instance set to the IP Address of this host. "def first(self): """"""Extract the first AppNexus object present in the response"""""" page = self.get_page(num_elements=1) data = self.extract_data(page) if data: return data[0]",Extract the first AppNexus object present in the response "def get_state(self, as_str=False): """"""Returns user state. See ``UserState``. :param bool as_str: Return human-friendly state name instead of an ID. :rtype: int|str """""" uid = self.user_id if self._iface_user.get_id() == uid: result = self._iface.get_my_state() else: result = self._iface.get_state(uid) if as_str: return UserState.get_alias(result) return result","Returns user state. See ``UserState``. :param bool as_str: Return human-friendly state name instead of an ID. :rtype: int|str" "def check(definition, data, *args, **kwargs): """"""Checks if the input follows the definition"""""" checker = checker_factory(definition) return checker(data, *args, **kwargs)",Checks if the input follows the definition "def keys(self, pattern=None): """"""Returns a list of keys matching ``pattern``. By default return all keys. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc['s0'] = 'string value' >>> dc.keys() ['l0', 's0'] >>> dc.keys('h*') [] >>> dc.clear() :param pattern: key pattern :type pattern: str :return: list of keys in db :rtype: list of str """""" logger.debug('call pop %s', pattern) if pattern is None: pattern = '*' return self._redis.keys(pattern=pattern)","Returns a list of keys matching ``pattern``. By default return all keys. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc['s0'] = 'string value' >>> dc.keys() ['l0', 's0'] >>> dc.keys('h*') [] >>> dc.clear() :param pattern: key pattern :type pattern: str :return: list of keys in db :rtype: list of str" "def update(self, status): """""" Update the RoomInstance :param RoomInstance.RoomStatus status: Set to completed to end the Room. :returns: Updated RoomInstance :rtype: twilio.rest.video.v1.room.RoomInstance """""" data = values.of({'Status': status, }) payload = self._version.update( 'POST', self._uri, data=data, ) return RoomInstance(self._version, payload, sid=self._solution['sid'], )","Update the RoomInstance :param RoomInstance.RoomStatus status: Set to completed to end the Room. :returns: Updated RoomInstance :rtype: twilio.rest.video.v1.room.RoomInstance" "def run_serial(target, jobs, n=1, **kwargs): """""" Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: ""{:d} {}"".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """""" return [[target(copy.copy(job), i + 1, j + 1) for j in range(n)] for i, job in enumerate(jobs)]","Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: ""{:d} {}"".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure" "def _parse_from_incar(filename, key): """""" Helper function to parse a parameter from the INCAR. """""" dirname = os.path.dirname(filename) for f in os.listdir(dirname): if re.search(r""INCAR"", f): warnings.warn(""INCAR found. Using "" + key + "" from INCAR."") incar = Incar.from_file(os.path.join(dirname, f)) if key in incar: return incar[key] else: return None return None",Helper function to parse a parameter from the INCAR. "def _compute_large_distance_term(self, C, dists, rup): """""" Compute and return large distance model term, that is the 8-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Large Distance Model', page 78. """""" # equation 15, page 79 if rup.mag < 5.5: T6 = 1.0 elif rup.mag >= 5.5 and rup.mag <= 6.5: T6 = 0.5 * (6.5 - rup.mag) + 0.5 else: T6 = 0.5 # equation 14, page 79 large_distance_term = np.zeros_like(dists.rrup) idx = dists.rrup >= 100.0 large_distance_term[idx] = C['a18'] * (dists.rrup[idx] - 100.0) * T6 return large_distance_term","Compute and return large distance model term, that is the 8-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Large Distance Model', page 78." "def serve_http(self): """""" serve_http serves the Prometheus endpoint. """""" start_http_server(port=self.options.port, addr=str(self.options.address))",serve_http serves the Prometheus endpoint. "def build_action(self, runnable, regime, action): """""" Build event handler action code. @param action: Event handler action object @type action: lems.model.dynamics.Action @return: Generated action code @rtype: string """""" if isinstance(action, StateAssignment): return self.build_state_assignment(runnable, regime, action) if isinstance(action, EventOut): return self.build_event_out(action) if isinstance(action, Transition): return self.build_transition(action) else: return ['pass']","Build event handler action code. @param action: Event handler action object @type action: lems.model.dynamics.Action @return: Generated action code @rtype: string" "def get_processed_path(self): """"""Returns the processed file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode` """""" location = self.get_storage().location return self.get_processed_key_name()[len(location):]","Returns the processed file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode`" "def array_to_base64_png(array): """"""Convert an array into base64-enoded PNG image. Args: array: A 2D np.ndarray or nested list of items. Returns: A base64-encoded string the image. The image is grayscale if the array is 2D. The image is RGB color if the image is 3D with lsat dimension equal to 3. Raises: ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is empty. """""" # TODO(cais): Deal with 3D case. # TODO(cais): If there are None values in here, replace them with all NaNs. array = np.array(array, dtype=np.float32) if len(array.shape) != 2: raise ValueError( ""Expected rank-2 array; received rank-%d array."" % len(array.shape)) if not np.size(array): raise ValueError( ""Cannot encode an empty array (size: %s) as image."" % (array.shape,)) is_infinity = np.isinf(array) is_positive = array > 0.0 is_positive_infinity = np.logical_and(is_infinity, is_positive) is_negative_infinity = np.logical_and(is_infinity, np.logical_not(is_positive)) is_nan = np.isnan(array) finite_indices = np.where(np.logical_and(np.logical_not(is_infinity), np.logical_not(is_nan))) if np.size(finite_indices): # Finite subset is not empty. minval = np.min(array[finite_indices]) maxval = np.max(array[finite_indices]) scaled = np.array((array - minval) / (maxval - minval) * 255, dtype=np.uint8) rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1) else: rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8) # Color-code pixels that correspond to infinities and nans. rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB rgb[is_nan] = NAN_RGB image_encoded = base64.b64encode(encoder.encode_png(rgb)) return image_encoded","Convert an array into base64-enoded PNG image. Args: array: A 2D np.ndarray or nested list of items. Returns: A base64-encoded string the image. The image is grayscale if the array is 2D. The image is RGB color if the image is 3D with lsat dimension equal to 3. Raises: ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is empty." "def set_permissions( self, owner, file_perms=PERMS_FILE_DEFAULT, dir_perms=PERMS_DIR_DEFAULT, use_sudo=True): """"""Set the owner and permissions of the code deploy. The owner will be set recursively for the entire code deploy. The directory permissions will be set on only the base of the code deploy and the releases directory. The file permissions will be set recursively for the entire code deploy. If not specified default values will be used for file or directory permissions. By default the Fabric ``sudo`` function will be used for changing the owner and permissions of the code deploy. Optionally, you can pass the ``use_sudo=False`` argument to skip trying to change the owner of the code deploy and to use the ``run`` function to change permissions. This method performs between three and four network operations depending on if ``use_sudo`` is false or true, respectively. :param str owner: User and group in the form 'owner:group' to set for the code deploy. :param str file_perms: Permissions to set for all files in the code deploy in the form 'u+perms,g+perms,o+perms'. Default is ``u+rw,g+rw,o+r``. :param str dir_perms: Permissions to set for the base and releases directories in the form 'u+perms,g+perms,o+perms'. Default is ``u+rwx,g+rws,o+rx``. :param bool use_sudo: If ``True``, use ``sudo()`` to change ownership and permissions of the code deploy. If ``False`` try to change permissions using the ``run()`` command, do not change ownership. .. versionchanged:: 0.2.0 ``use_sudo=False`` will no longer attempt to change ownership of the code deploy since this will just be a no-op or fail. """""" runner = self._runner.sudo if use_sudo else self._runner.run if use_sudo: runner(""chown -R '{0}' '{1}'"".format(owner, self._base)) for path in (self._base, self._releases): runner(""chmod '{0}' '{1}'"".format(dir_perms, path)) runner(""chmod -R '{0}' '{1}'"".format(file_perms, self._base))","Set the owner and permissions of the code deploy. The owner will be set recursively for the entire code deploy. The directory permissions will be set on only the base of the code deploy and the releases directory. The file permissions will be set recursively for the entire code deploy. If not specified default values will be used for file or directory permissions. By default the Fabric ``sudo`` function will be used for changing the owner and permissions of the code deploy. Optionally, you can pass the ``use_sudo=False`` argument to skip trying to change the owner of the code deploy and to use the ``run`` function to change permissions. This method performs between three and four network operations depending on if ``use_sudo`` is false or true, respectively. :param str owner: User and group in the form 'owner:group' to set for the code deploy. :param str file_perms: Permissions to set for all files in the code deploy in the form 'u+perms,g+perms,o+perms'. Default is ``u+rw,g+rw,o+r``. :param str dir_perms: Permissions to set for the base and releases directories in the form 'u+perms,g+perms,o+perms'. Default is ``u+rwx,g+rws,o+rx``. :param bool use_sudo: If ``True``, use ``sudo()`` to change ownership and permissions of the code deploy. If ``False`` try to change permissions using the ``run()`` command, do not change ownership. .. versionchanged:: 0.2.0 ``use_sudo=False`` will no longer attempt to change ownership of the code deploy since this will just be a no-op or fail." "def codepoint_included(self, codepoint): """"""Check if codepoint matches any of the defined codepoints."""""" if self.codepoints == None: return True for cp in self.codepoints: mismatch = False for i in range(len(cp)): if (cp[i] is not None) and (cp[i] != codepoint[i]): mismatch = True break if not mismatch: return True return False",Check if codepoint matches any of the defined codepoints. "def excepthook(type, value, traceback): # pylint: disable=unused-argument """"""Log exceptions instead of printing a traceback to stderr."""""" try: six.reraise(type, value, traceback) except type: _LOGGER.exception(str(value)) if isinstance(value, KeyboardInterrupt): message = ""Cancelling at the user's request."" else: message = handle_unexpected_exception(value) print(message, file=sys.stderr)",Log exceptions instead of printing a traceback to stderr. "def make_general(basis, use_copy=True): """""" Makes one large general contraction for each angular momentum If use_copy is True, the input basis set is not modified. The output of this function is not pretty. If you want to make it nicer, use sort_basis afterwards. """""" zero = '0.00000000' basis = uncontract_spdf(basis, 0, use_copy) for k, el in basis['elements'].items(): if not 'electron_shells' in el: continue # See what we have all_am = [] for sh in el['electron_shells']: if not sh['angular_momentum'] in all_am: all_am.append(sh['angular_momentum']) all_am = sorted(all_am) newshells = [] for am in all_am: newsh = { 'angular_momentum': am, 'exponents': [], 'coefficients': [], 'region': '', 'function_type': None, } # Do exponents first for sh in el['electron_shells']: if sh['angular_momentum'] != am: continue newsh['exponents'].extend(sh['exponents']) # Number of primitives in the new shell nprim = len(newsh['exponents']) cur_prim = 0 for sh in el['electron_shells']: if sh['angular_momentum'] != am: continue if newsh['function_type'] is None: newsh['function_type'] = sh['function_type'] # Make sure the shells we are merging have the same function types ft1 = newsh['function_type'] ft2 = sh['function_type'] # Check if one function type is the subset of another # (should handle gto/gto_spherical, etc) if ft1 not in ft2 and ft2 not in ft1: raise RuntimeError(""Cannot make general contraction of different function types"") ngen = len(sh['coefficients']) for g in range(ngen): coef = [zero] * cur_prim coef.extend(sh['coefficients'][g]) coef.extend([zero] * (nprim - len(coef))) newsh['coefficients'].append(coef) cur_prim += len(sh['exponents']) newshells.append(newsh) el['electron_shells'] = newshells return basis","Makes one large general contraction for each angular momentum If use_copy is True, the input basis set is not modified. The output of this function is not pretty. If you want to make it nicer, use sort_basis afterwards." "def dispatch_write(self, buf): """"""There is new stuff to write when possible"""""" if self.state != STATE_DEAD and self.enabled: super().dispatch_write(buf) return True return False",There is new stuff to write when possible "def _get_coarse_dataset(self, key, info): """"""Get the coarse dataset refered to by `key` from the XML data."""""" angles = self.root.find('.//Tile_Angles') if key in ['solar_zenith_angle', 'solar_azimuth_angle']: elts = angles.findall(info['xml_tag'] + '/Values_List/VALUES') return np.array([[val for val in elt.text.split()] for elt in elts], dtype=np.float) elif key in ['satellite_zenith_angle', 'satellite_azimuth_angle']: arrays = [] elts = angles.findall(info['xml_tag'] + '[@bandId=""1""]') for elt in elts: items = elt.findall(info['xml_item'] + '/Values_List/VALUES') arrays.append(np.array([[val for val in item.text.split()] for item in items], dtype=np.float)) return np.nanmean(np.dstack(arrays), -1) else: return",Get the coarse dataset refered to by `key` from the XML data. "def display_matrix(self, matrix, interval=2.0, brightness=1.0, fading=False, ignore_duplicates=False): """""" Displays an LED matrix on Nuimo's LED matrix display. :param matrix: the matrix to display :param interval: interval in seconds until the matrix disappears again :param brightness: led brightness between 0..1 :param fading: if True, the previous matrix fades into the new matrix :param ignore_duplicates: if True, the matrix is not sent again if already being displayed """""" self._matrix_writer.write( matrix=matrix, interval=interval, brightness=brightness, fading=fading, ignore_duplicates=ignore_duplicates )","Displays an LED matrix on Nuimo's LED matrix display. :param matrix: the matrix to display :param interval: interval in seconds until the matrix disappears again :param brightness: led brightness between 0..1 :param fading: if True, the previous matrix fades into the new matrix :param ignore_duplicates: if True, the matrix is not sent again if already being displayed" "def remove_intersecting(self, division, symm=True): """"""Removes paired relationships between intersecting divisions"""""" IntersectRelationship.objects.filter( from_division=self, to_division=division ).delete() if symm: division.remove_intersecting(self, False)",Removes paired relationships between intersecting divisions "async def _async_get_data(self, resource, id=None): """"""Get the data from the resource."""""" if id: url = urljoin(self._api_url, ""spc/{}/{}"".format(resource, id)) else: url = urljoin(self._api_url, ""spc/{}"".format(resource)) data = await async_request(self._session.get, url) if not data: return False if id and isinstance(data['data'][resource], list): # for some reason the gateway returns an array with a single # element for areas but not for zones... return data['data'][resource][0] elif id: return data['data'][resource] return [item for item in data['data'][resource]]",Get the data from the resource. "def get_bgcolor(self, index): """"""Background color depending on value."""""" column = index.column() if not self.bgcolor_enabled: return value = self.get_value(index.row(), column) if self.max_min_col[column] is None or isna(value): color = QColor(BACKGROUND_NONNUMBER_COLOR) if is_text_string(value): color.setAlphaF(BACKGROUND_STRING_ALPHA) else: color.setAlphaF(BACKGROUND_MISC_ALPHA) else: if isinstance(value, COMPLEX_NUMBER_TYPES): color_func = abs else: color_func = float vmax, vmin = self.return_max(self.max_min_col, column) hue = (BACKGROUND_NUMBER_MINHUE + BACKGROUND_NUMBER_HUERANGE * (vmax - color_func(value)) / (vmax - vmin)) hue = float(abs(hue)) if hue > 1: hue = 1 color = QColor.fromHsvF(hue, BACKGROUND_NUMBER_SATURATION, BACKGROUND_NUMBER_VALUE, BACKGROUND_NUMBER_ALPHA) return color",Background color depending on value. "def sync(self, resource_types='[""all""]'): """"""Synchronize the user's data with the Todoist server. This function will pull data from the Todoist server and update the state of the user object such that they match. It does not *push* data to Todoist. If you want to do that use :func:`pytodoist.todoist.User.update`. :param resource_types: A JSON-encoded list of Todoist resources which should be synced. By default this is everything, but you can choose to sync only selected resources. See `here `_ for a list of resources. """""" response = API.sync(self.api_token, '*', resource_types) _fail_if_contains_errors(response) response_json = response.json() self.sync_token = response_json['sync_token'] if 'projects' in response_json: self._sync_projects(response_json['projects']) if 'items' in response_json: self._sync_tasks(response_json['items']) if 'notes' in response_json: self._sync_notes(response_json['notes']) if 'labels' in response_json: self._sync_labels(response_json['labels']) if 'filters' in response_json: self._sync_filters(response_json['filters']) if 'reminders' in response_json: self._sync_reminders(response_json['reminders'])","Synchronize the user's data with the Todoist server. This function will pull data from the Todoist server and update the state of the user object such that they match. It does not *push* data to Todoist. If you want to do that use :func:`pytodoist.todoist.User.update`. :param resource_types: A JSON-encoded list of Todoist resources which should be synced. By default this is everything, but you can choose to sync only selected resources. See `here `_ for a list of resources." "def build(self): """"""Only build and create Slackware package """""" pkg_security([self.name]) self.error_uns() if self.FAULT: print("""") self.msg.template(78) print(""| Package {0} {1} {2} {3}"".format(self.prgnam, self.red, self.FAULT, self.endc)) self.msg.template(78) else: sources = [] if not os.path.exists(self.meta.build_path): os.makedirs(self.meta.build_path) if not os.path.exists(self._SOURCES): os.makedirs(self._SOURCES) os.chdir(self.meta.build_path) Download(self.meta.build_path, self.sbo_dwn.split(), repo=""sbo"").start() Download(self._SOURCES, self.source_dwn, repo=""sbo"").start() script = self.sbo_dwn.split(""/"")[-1] for src in self.source_dwn: sources.append(src.split(""/"")[-1]) BuildPackage(script, sources, self.meta.build_path, auto=False).build() slack_package(self.prgnam)",Only build and create Slackware package "def process_results(self): """""" function that is called when a stage is completed and needs to be analyzed befor further computations. The code here implements the original SH algorithms by advancing the k-best (lowest loss) configurations at the current budget. k is defined by the num_configs list (see __init__) and the current stage value. For more advanced methods like resampling after each stage, overload this function only. """""" self.stage += 1 # collect all config_ids that need to be compared config_ids = list(filter(lambda cid: self.data[cid].status == 'REVIEW', self.data.keys())) if (self.stage >= len(self.num_configs)): self.finish_up() return budgets = [self.data[cid].budget for cid in config_ids] if len(set(budgets)) > 1: raise RuntimeError('Not all configurations have the same budget!') budget = self.budgets[self.stage-1] losses = np.array([self.data[cid].results[budget]['loss'] for cid in config_ids]) advance = self._advance_to_next_stage(config_ids, losses) for i, a in enumerate(advance): if a: self.logger.debug('ITERATION: Advancing config %s to next budget %f'%(config_ids[i], self.budgets[self.stage])) for i, cid in enumerate(config_ids): if advance[i]: self.data[cid].status = 'QUEUED' self.data[cid].budget = self.budgets[self.stage] self.actual_num_configs[self.stage] += 1 else: self.data[cid].status = 'TERMINATED'","function that is called when a stage is completed and needs to be analyzed befor further computations. The code here implements the original SH algorithms by advancing the k-best (lowest loss) configurations at the current budget. k is defined by the num_configs list (see __init__) and the current stage value. For more advanced methods like resampling after each stage, overload this function only." "def rwishart_cov(n, C): """""" Return a Wishart random matrix. :Parameters: n : int Degrees of freedom, > 0. C : matrix Symmetric and positive definite """""" # return rwishart(n, np.linalg.inv(C)) p = np.shape(C)[0] # Need cholesky decomposition of precision matrix C^-1? sig = np.linalg.cholesky(C) if n <= (p-1): raise ValueError('Wishart parameter n must be greater ' 'than size of matrix.') norms = np.random.normal(size=(p * (p - 1)) // 2) chi_sqs = np.sqrt(np.random.chisquare(df=np.arange(n, n - p, -1))) A = flib.expand_triangular(chi_sqs, norms) flib.dtrmm_wrap(sig, A, side='L', uplo='L', transa='N', alpha=1.) w = np.asmatrix(np.dot(A, A.T)) flib.symmetrize(w) return w","Return a Wishart random matrix. :Parameters: n : int Degrees of freedom, > 0. C : matrix Symmetric and positive definite" "def anonymize(self, value): """""" Should this recipe be anonymized"""""" assert isinstance(value, bool) if self._anonymize != value: self.dirty = True self._anonymize = value # Builder pattern must return the recipe return self.recipe",Should this recipe be anonymized "def create_asset_accesspolicy(access_token, name, duration, permission=""1""): '''Create Media Service Asset Access Policy. Args: access_token (str): A valid Azure authentication token. name (str): A Media Service Asset Access Policy Name. duration (str): A Media Service duration. permission (str): A Media Service permission. Returns: HTTP response. JSON body. ''' path = '/AccessPolicies' endpoint = ''.join([ams_rest_endpoint, path]) body = '{ \ ""Name"": ""' + str(name) + '"", \ ""DurationInMinutes"": ""' + duration + '"", \ ""Permissions"": ""' + permission + '"" \ }' return do_ams_post(endpoint, path, body, access_token)","Create Media Service Asset Access Policy. Args: access_token (str): A valid Azure authentication token. name (str): A Media Service Asset Access Policy Name. duration (str): A Media Service duration. permission (str): A Media Service permission. Returns: HTTP response. JSON body." "def shell(cmd, *args, **kwargs): # type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str] """""" Execute shell command and return output Args: cmd (str): the command itself, i.e. part until the first space *args: positional arguments, i.e. other space-separated parts rel_path (bool): execute relative to the path (default: `False`) raise_on_status(bool): bool, raise exception if command exited with non-zero status (default: `True`) stderr (file-like): file-like object to collect stderr output, None by default Returns: Tuple[int, str]: status, shell output """""" if kwargs.get('rel_path') and not cmd.startswith(""/""): cmd = os.path.join(kwargs['rel_path'], cmd) status = 0 try: output = subprocess.check_output( (cmd,) + args, stderr=kwargs.get('stderr')) except subprocess.CalledProcessError as e: if kwargs.get('raise_on_status', True): raise e output = e.output status = e.returncode except OSError as e: # command not found if kwargs.get('raise_on_status', True): raise e if 'stderr' in kwargs: kwargs['stderr'].write(e.message) return -1, """" if six.PY3: output = output.decode('utf8') return status, output","Execute shell command and return output Args: cmd (str): the command itself, i.e. part until the first space *args: positional arguments, i.e. other space-separated parts rel_path (bool): execute relative to the path (default: `False`) raise_on_status(bool): bool, raise exception if command exited with non-zero status (default: `True`) stderr (file-like): file-like object to collect stderr output, None by default Returns: Tuple[int, str]: status, shell output" "def do_GET_body(self): """"""Create body of GET."""""" iiif = self.iiif if (len(self.path) > 1024): raise IIIFError(code=414, text=""URI Too Long: Max 1024 chars, got %d\n"" % len(self.path)) try: # self.path has leading / then identifier/params... self.path = self.path.lstrip('/') sys.stderr.write(""path = %s"" % (self.path)) iiif.parse_url(self.path) except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text=""Internal Server Error: unexpected exception parsing request ("" + str(e) + "")"") # Now we have a full iiif request if (re.match('[\w\.\-]+$', iiif.identifier)): file = os.path.join(TESTIMAGE_DIR, iiif.identifier) if (not os.path.isfile(file)): images_available = """" for image_file in os.listdir(TESTIMAGE_DIR): if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))): images_available += "" "" + image_file + ""\n"" raise IIIFError(code=404, parameter=""identifier"", text=""Image resource '"" + iiif.identifier + ""' not found. Local image files available:\n"" + images_available) else: raise IIIFError(code=404, parameter=""identifier"", text=""Image resource '"" + iiif.identifier + ""' not found. Only local test images and http: URIs for images are supported.\n"") # Now know image is OK manipulator = IIIFRequestHandler.manipulator_class() # Stash manipulator object so we can cleanup after reading file self.manipulator = manipulator self.compliance_uri = manipulator.compliance_uri if (iiif.info): # get size manipulator.srcfile = file manipulator.do_first() # most of info.json comes from config, a few things # specific to image i = IIIFInfo() i.identifier = self.iiif.identifier i.width = manipulator.width i.height = manipulator.height import io return(io.StringIO(i.as_json()), ""application/json"") else: (outfile, mime_type) = manipulator.derive(file, iiif) return(open(outfile, 'r'), mime_type)",Create body of GET. "def run_process(cwd, args): """"""Executes an external process via subprocess.Popen"""""" try: process = check_output(args, cwd=cwd, stderr=STDOUT) return process except CalledProcessError as e: log('Uh oh, the teapot broke again! Error:', e, type(e), lvl=verbose, pretty=True) log(e.cmd, e.returncode, e.output, lvl=verbose) return e.output",Executes an external process via subprocess.Popen "def _design_matrix( model: pd.DataFrame, batch_key: str, batch_levels: Collection[str], ) -> pd.DataFrame: """""" Computes a simple design matrix. Parameters -------- model Contains the batch annotation batch_key Name of the batch column batch_levels Levels of the batch annotation Returns -------- The design matrix for the regression problem """""" import patsy design = patsy.dmatrix( ""~ 0 + C(Q('{}'), levels=batch_levels)"".format(batch_key), model, return_type=""dataframe"", ) model = model.drop([batch_key], axis=1) numerical_covariates = model.select_dtypes('number').columns.values logg.info(""Found {} batches\n"".format(design.shape[1])) other_cols = [c for c in model.columns.values if c not in numerical_covariates] if other_cols: col_repr = "" + "".join(""Q('{}')"".format(x) for x in other_cols) factor_matrix = patsy.dmatrix(""~ 0 + {}"".format(col_repr), model[other_cols], return_type=""dataframe"") design = pd.concat((design, factor_matrix), axis=1) logg.info(""Found {} categorical variables:"".format(len(other_cols))) logg.info(""\t"" + "", "".join(other_cols) + '\n') if numerical_covariates is not None: logg.info(""Found {} numerical variables:"".format(len(numerical_covariates))) logg.info(""\t"" + "", "".join(numerical_covariates) + '\n') for nC in numerical_covariates: design[nC] = model[nC] return design","Computes a simple design matrix. Parameters -------- model Contains the batch annotation batch_key Name of the batch column batch_levels Levels of the batch annotation Returns -------- The design matrix for the regression problem" "def add_to_cache(self, cache_id, inputs, output): """"""This adds cache_id to the cache, with inputs and output"""""" self.inputs_changed[cache_id] = False self.cached_outputs[cache_id] = output self.order.append(cache_id) self.cached_inputs[cache_id] = inputs for a in inputs: if a is not None and not isinstance(a, Number) and not isinstance(a, str): ind_id = self.id(a) v = self.cached_input_ids.get(ind_id, [weakref.ref(a), []]) v[1].append(cache_id) if len(v[1]) == 1: a.add_observer(self, self.on_cache_changed) self.cached_input_ids[ind_id] = v","This adds cache_id to the cache, with inputs and output" "def insert_into(self, table=None, field_names=None, values=None, **kwargs): """""" Bulk inserts a list of values into a table :type table: str or dict or :class:`Table ` or :class:`Query ` or :class:`ModelBase ` :param table: The table to select fields from. This can be a string of the table name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a django Model instance :type field_names: list :param field_names: A list of ordered field names that relate to the data in the values list :type values: list of list :param values: A list each values list with the values in the same order as the field names :param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory`` :return: self :rtype: :class:`Query ` """""" table = TableFactory( table=table, **kwargs ) self.tables.append(table) self.field_names = field_names self.values = values return self","Bulk inserts a list of values into a table :type table: str or dict or :class:`Table ` or :class:`Query ` or :class:`ModelBase ` :param table: The table to select fields from. This can be a string of the table name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a django Model instance :type field_names: list :param field_names: A list of ordered field names that relate to the data in the values list :type values: list of list :param values: A list each values list with the values in the same order as the field names :param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory`` :return: self :rtype: :class:`Query `" "def __remove_surrogates(self, s, method='replace'): """""" Remove surrogates in the specified string """""" if type(s) == list and len(s) == 1: if self.__is_surrogate_escaped(s[0]): return s[0].encode('utf-8', method).decode('utf-8') else: return """" if type(s) == list: return """" if type(s) != str: return """" if self.__is_surrogate_escaped(s): return s.encode('utf-8', method).decode('utf-8') return s",Remove surrogates in the specified string "def parse(s): """"""Parse full s-expr from bytes."""""" if s.startswith(b'('): s = s[1:] name, s = parse_term(s) values = [name] while not s.startswith(b')'): value, s = parse(s) values.append(value) return values, s[1:] return parse_term(s)",Parse full s-expr from bytes. "def eval_condition(self, event): """""" Evaluates the breakpoint condition, if any was set. @type event: L{Event} @param event: Debug event triggered by the breakpoint. @rtype: bool @return: C{True} to dispatch the event, C{False} otherwise. """""" condition = self.get_condition() if condition is True: # shortcut for unconditional breakpoints return True if callable(condition): try: return bool( condition(event) ) except Exception: e = sys.exc_info()[1] msg = (""Breakpoint condition callback %r"" "" raised an exception: %s"") msg = msg % (condition, traceback.format_exc(e)) warnings.warn(msg, BreakpointCallbackWarning) return False return bool( condition )","Evaluates the breakpoint condition, if any was set. @type event: L{Event} @param event: Debug event triggered by the breakpoint. @rtype: bool @return: C{True} to dispatch the event, C{False} otherwise." "def get_surrogate_id(self): """""" This is responsible for building the surrogate id from the model """""" surrogate_id = ""%s,%s,%s,%s,%s"" % (self.year, self.quarter, self.curriculum_abbr.lower(), self.course_number, self.section_id.lower()) return surrogate_id",This is responsible for building the surrogate id from the model "def parseArgs(): """"""Parses arguments passed in via the command line"""""" parser = argparse.ArgumentParser() parser.add_argument(""name"", help=""the file you want to split"") parser.add_argument(""out1"", help=""the name of the first file you want to output"") parser.add_argument(""out2"", help=""the name of the second file you want to output"") return parser.parse_args()",Parses arguments passed in via the command line "def db_create(database, containment='NONE', new_database_options=None, **kwargs): ''' Creates a new database. Does not update options of existing databases. new_database_options can only be a list of strings CLI Example: .. code-block:: bash salt minion mssql.db_create DB_NAME ''' if containment not in ['NONE', 'PARTIAL']: return 'CONTAINMENT can be one of NONE and PARTIAL' sql = ""CREATE DATABASE [{0}] CONTAINMENT = {1} "".format(database, containment) if new_database_options: sql += ' WITH ' + ', '.join(new_database_options) conn = None try: conn = _get_connection(**kwargs) conn.autocommit(True) # cur = conn.cursor() # cur.execute(sql) conn.cursor().execute(sql) except Exception as e: return 'Could not create the login: {0}'.format(e) finally: if conn: conn.autocommit(False) conn.close() return True","Creates a new database. Does not update options of existing databases. new_database_options can only be a list of strings CLI Example: .. code-block:: bash salt minion mssql.db_create DB_NAME" "def each(iterable = None, *, name = None, metric = call_default): """"""Measure time elapsed to produce each item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """""" if iterable is None: return _each_decorator(name, metric) else: return _do_each(iterable, name, metric)","Measure time elapsed to produce each item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric" "def default_token_implementation(self, user_id): """""" Default JWT token implementation This is used by default for generating user tokens if custom implementation was not configured. The token will contain user_id and expiration date. If you need more information added to the token, register your custom implementation. It will load a user to see if token is already on file. If it is, the existing token will be checked for expiration and returned if valid. Otherwise a new token will be generated and persisted. This can be used to perform token revocation. :param user_id: int, user id :return: string """""" user = self.get(user_id) if not user: msg = 'No user with such id [{}]' raise x.JwtNoUser(msg.format(user_id)) # return token if exists and valid if user._token: try: self.decode_token(user._token) return user._token except jwt.exceptions.ExpiredSignatureError: pass from_now = datetime.timedelta(seconds=self.jwt_lifetime) expires = datetime.datetime.utcnow() + from_now issued = datetime.datetime.utcnow() not_before = datetime.datetime.utcnow() data = dict( exp=expires, nbf=not_before, iat=issued, user_id=user_id ) token = jwt.encode(data, self.jwt_secret, algorithm=self.jwt_algo) string_token = token.decode('utf-8') user._token = string_token self.save(user) return string_token","Default JWT token implementation This is used by default for generating user tokens if custom implementation was not configured. The token will contain user_id and expiration date. If you need more information added to the token, register your custom implementation. It will load a user to see if token is already on file. If it is, the existing token will be checked for expiration and returned if valid. Otherwise a new token will be generated and persisted. This can be used to perform token revocation. :param user_id: int, user id :return: string" "def on_message(self, client, userdata, msg): ''' Callback for when a ``PUBLISH`` message is received from the broker. ''' if msg.topic == 'serial_device/refresh_comports': self.refresh_comports() return match = CRE_MANAGER.match(msg.topic) if match is None: logger.debug('Topic NOT matched: `%s`', msg.topic) else: logger.debug('Topic matched: `%s`', msg.topic) # Message topic matches command. Handle request. command = match.group('command') port = match.group('port') # serial_device//send # Bytes to send if command == 'send': self._serial_send(port, msg.payload) elif command == 'connect': # serial_device//connect # Request connection try: request = json.loads(msg.payload) except ValueError as exception: logger.error('Error decoding ""%s (%s)"" request: %s', command, port, exception) return self._serial_connect(port, request) elif command == 'close': self._serial_close(port)",Callback for when a ``PUBLISH`` message is received from the broker. "def build_pyfile_path_from_docname(self, docfile): """"""Build the expected Python file name based on the given documentation file name. :param str docfile: The documentation file name from which to build the Python file name. :rtype: str """""" name, ext = os.path.splitext(docfile) expected_py_name = name.replace('.', '/') + '.py' return expected_py_name","Build the expected Python file name based on the given documentation file name. :param str docfile: The documentation file name from which to build the Python file name. :rtype: str" "def federated_query(self, environment_id, filter=None, query=None, natural_language_query=None, passages=None, aggregation=None, count=None, return_fields=None, offset=None, sort=None, highlight=None, passages_fields=None, passages_count=None, passages_characters=None, deduplicate=None, deduplicate_field=None, collection_ids=None, similar=None, similar_document_ids=None, similar_fields=None, bias=None, logging_opt_out=None, **kwargs): """""" Long environment queries. Complex queries might be too long for a standard method query. By using this method, you can construct longer queries. However, these queries may take longer to complete than the standard method. For details, see the [Discovery service documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). :param str environment_id: The ID of the environment. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param str natural_language_query: A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use **natural_language_query** and **query** at the same time. :param bool passages: A passages query that returns the most relevant passages from the results. :param str aggregation: An aggregation search that returns an exact answer by combining query search with filters. Useful for applications to build lists, tables, and time series. For a full list of possible aggregations, see the Query reference. :param int count: Number of results to return. :param str return_fields: A comma-separated list of the portion of the document hierarchy to return. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. :param str sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. This parameter cannot be used in the same query as the **bias** parameter. :param bool highlight: When true, a highlight field is returned for each result which contains the fields which match the query with `` tags around the matching query terms. :param str passages_fields: A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. :param int passages_count: The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. :param int passages_characters: The approximate number of characters that any one passage will have. :param bool deduplicate: When `true`, and used with a Watson Discovery News collection, duplicate results (based on the contents of the **title** field) are removed. Duplicate comparison is limited to the current query only; **offset** is not considered. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not considered. This parameter is currently Beta functionality. :param str collection_ids: A comma-separated list of collection IDs to be queried against. Required when querying multiple collections, invalid when performing a single collection query. :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the **similar.document_ids** parameter. :param str similar_document_ids: A comma-separated list of document IDs to find similar documents. **Tip:** Include the **natural_language_query** parameter to expand the scope of the document similarity search with the natural language query. Other query parameters, such as **filter** and **query**, are subsequently applied and reduce the scope. :param str similar_fields: A comma-separated list of field names that are used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :param str bias: Field which the returned results will be biased against. The specified field must be either a **date** or **number** format. When a **date** type field is specified returned results are biased towards field values closer to the current date. When a **number** type field is specified, returned results are biased towards higher field values. This parameter cannot be used in the same query as the **sort** parameter. :param bool logging_opt_out: If `true`, queries are not stored in the Discovery **Logs** endpoint. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """""" if environment_id is None: raise ValueError('environment_id must be provided') headers = {'X-Watson-Logging-Opt-Out': logging_opt_out} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'federated_query') headers.update(sdk_headers) params = {'version': self.version} data = { 'filter': filter, 'query': query, 'natural_language_query': natural_language_query, 'passages': passages, 'aggregation': aggregation, 'count': count, 'return': return_fields, 'offset': offset, 'sort': sort, 'highlight': highlight, 'passages.fields': passages_fields, 'passages.count': passages_count, 'passages.characters': passages_characters, 'deduplicate': deduplicate, 'deduplicate.field': deduplicate_field, 'collection_ids': collection_ids, 'similar': similar, 'similar.document_ids': similar_document_ids, 'similar.fields': similar_fields, 'bias': bias } url = '/v1/environments/{0}/query'.format( *self._encode_path_vars(environment_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response","Long environment queries. Complex queries might be too long for a standard method query. By using this method, you can construct longer queries. However, these queries may take longer to complete than the standard method. For details, see the [Discovery service documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). :param str environment_id: The ID of the environment. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param str natural_language_query: A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use **natural_language_query** and **query** at the same time. :param bool passages: A passages query that returns the most relevant passages from the results. :param str aggregation: An aggregation search that returns an exact answer by combining query search with filters. Useful for applications to build lists, tables, and time series. For a full list of possible aggregations, see the Query reference. :param int count: Number of results to return. :param str return_fields: A comma-separated list of the portion of the document hierarchy to return. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. :param str sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. This parameter cannot be used in the same query as the **bias** parameter. :param bool highlight: When true, a highlight field is returned for each result which contains the fields which match the query with `` tags around the matching query terms. :param str passages_fields: A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. :param int passages_count: The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. :param int passages_characters: The approximate number of characters that any one passage will have. :param bool deduplicate: When `true`, and used with a Watson Discovery News collection, duplicate results (based on the contents of the **title** field) are removed. Duplicate comparison is limited to the current query only; **offset** is not considered. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not considered. This parameter is currently Beta functionality. :param str collection_ids: A comma-separated list of collection IDs to be queried against. Required when querying multiple collections, invalid when performing a single collection query. :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the **similar.document_ids** parameter. :param str similar_document_ids: A comma-separated list of document IDs to find similar documents. **Tip:** Include the **natural_language_query** parameter to expand the scope of the document similarity search with the natural language query. Other query parameters, such as **filter** and **query**, are subsequently applied and reduce the scope. :param str similar_fields: A comma-separated list of field names that are used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :param str bias: Field which the returned results will be biased against. The specified field must be either a **date** or **number** format. When a **date** type field is specified returned results are biased towards field values closer to the current date. When a **number** type field is specified, returned results are biased towards higher field values. This parameter cannot be used in the same query as the **sort** parameter. :param bool logging_opt_out: If `true`, queries are not stored in the Discovery **Logs** endpoint. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse" "def _ibis_sqlite_regex_extract(string, pattern, index): """"""Extract match of regular expression `pattern` from `string` at `index`. Parameters ---------- string : str pattern : str index : int Returns ------- result : str or None """""" result = re.search(pattern, string) if result is not None and 0 <= index <= (result.lastindex or -1): return result.group(index) return None","Extract match of regular expression `pattern` from `string` at `index`. Parameters ---------- string : str pattern : str index : int Returns ------- result : str or None" "def increment(self, conn, seqName, transaction = False, incCount=1): """""" increments the sequence `seqName` by default `Incremented by one` and returns its value """""" try: seqTable = ""%sS"" %seqName tlock = ""lock tables %s write"" %seqTable self.dbi.processData(tlock, [], conn, transaction) sql = ""select ID from %s"" % seqTable result = self.dbi.processData(sql, [], conn, transaction) resultlist = self.formatDict(result) newSeq = resultlist[0]['id']+incCount sql = ""UPDATE %s SET ID=:seq_count"" % seqTable seqparms={""seq_count"" : newSeq} self.dbi.processData(sql, seqparms, conn, transaction) tunlock = ""unlock tables"" self.dbi.processData(tunlock, [], conn, transaction) return newSeq except: #FIXME tunlock = ""unlock tables"" self.dbi.processData(tunlock, [], conn, transaction) raise","increments the sequence `seqName` by default `Incremented by one` and returns its value" "def text_to_vector(sent_str): """"""Given a string, get it's defalted vector, inflate it, then return the inflated vector"""""" r = requests.get(""{}/sva/vector"".format(VECTORIZE_API), params={'s':sent_str}) return inflate(r.text)","Given a string, get it's defalted vector, inflate it, then return the inflated vector" "def afni_wf(name='AFNISkullStripWorkflow', unifize=False, n4_nthreads=1): """""" Skull-stripping workflow Originally derived from the `codebase of the QAP `_. Now, this workflow includes :abbr:`INU (intensity non-uniformity)` correction using the N4 algorithm and (optionally) intensity harmonization using ANFI's ``3dUnifize``. """""" workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['bias_corrected', 'out_file', 'out_mask', 'bias_image']), name='outputnode') inu_n4 = pe.Node( ants.N4BiasFieldCorrection(dimension=3, save_bias=True, num_threads=n4_nthreads, copy_header=True), n_procs=n4_nthreads, name='inu_n4') sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip') sstrip_orig_vol = pe.Node(afni.Calc( expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol') binarize = pe.Node(fsl.Threshold(args='-bin', thresh=1.e-3), name='binarize') if unifize: # Add two unifize steps, pre- and post- skullstripping. inu_uni_0 = pe.Node(afni.Unifize(outputtype='NIFTI_GZ'), name='unifize_pre_skullstrip') inu_uni_1 = pe.Node(afni.Unifize(gm=True, outputtype='NIFTI_GZ'), name='unifize_post_skullstrip') workflow.connect([ (inu_n4, inu_uni_0, [('output_image', 'in_file')]), (inu_uni_0, sstrip, [('out_file', 'in_file')]), (inu_uni_0, sstrip_orig_vol, [('out_file', 'in_file_a')]), (sstrip_orig_vol, inu_uni_1, [('out_file', 'in_file')]), (inu_uni_1, outputnode, [('out_file', 'out_file')]), (inu_uni_0, outputnode, [('out_file', 'bias_corrected')]), ]) else: workflow.connect([ (inputnode, sstrip_orig_vol, [('in_file', 'in_file_a')]), (inu_n4, sstrip, [('output_image', 'in_file')]), (sstrip_orig_vol, outputnode, [('out_file', 'out_file')]), (inu_n4, outputnode, [('output_image', 'bias_corrected')]), ]) # Remaining connections workflow.connect([ (sstrip, sstrip_orig_vol, [('out_file', 'in_file_b')]), (inputnode, inu_n4, [('in_file', 'input_image')]), (sstrip_orig_vol, binarize, [('out_file', 'in_file')]), (binarize, outputnode, [('out_file', 'out_mask')]), (inu_n4, outputnode, [('bias_image', 'bias_image')]), ]) return workflow","Skull-stripping workflow Originally derived from the `codebase of the QAP `_. Now, this workflow includes :abbr:`INU (intensity non-uniformity)` correction using the N4 algorithm and (optionally) intensity harmonization using ANFI's ``3dUnifize``." "def get_replies_to(self, msg): """""" returns all replies to the given message contained in this thread. :param msg: parent message to look up :type msg: :class:`~alot.db.message.Message` :returns: list of :class:`~alot.db.message.Message` or `None` """""" mid = msg.get_message_id() msg_hash = self.get_messages() for m in msg_hash.keys(): if m.get_message_id() == mid: return msg_hash[m] return None","returns all replies to the given message contained in this thread. :param msg: parent message to look up :type msg: :class:`~alot.db.message.Message` :returns: list of :class:`~alot.db.message.Message` or `None`" "def emit_code_from_ir(sql_query_tree, compiler_metadata): """"""Return a SQLAlchemy Query from a passed SqlQueryTree. Args: sql_query_tree: SqlQueryTree, tree representation of the query to emit. compiler_metadata: SqlMetadata, SQLAlchemy specific metadata. Returns: SQLAlchemy Query """""" context = CompilationContext( query_path_to_selectable=dict(), query_path_to_location_info=sql_query_tree.query_path_to_location_info, query_path_to_output_fields=sql_query_tree.query_path_to_output_fields, query_path_to_filters=sql_query_tree.query_path_to_filters, query_path_to_node=sql_query_tree.query_path_to_node, compiler_metadata=compiler_metadata, ) return _query_tree_to_query(sql_query_tree.root, context)","Return a SQLAlchemy Query from a passed SqlQueryTree. Args: sql_query_tree: SqlQueryTree, tree representation of the query to emit. compiler_metadata: SqlMetadata, SQLAlchemy specific metadata. Returns: SQLAlchemy Query" "def _enhance_keys(cls, keys=None, *args, **kwargs): """""" Enhance the given keys by groups Parameters ---------- keys: list of str or None If None, the all formatoptions of the given class are used. Group names from the :attr:`psyplot.plotter.groups` mapping are replaced by the formatoptions Other Parameters ---------------- %(check_key.parameters.kwargs)s Returns ------- list of str The enhanced list of the formatoptions"""""" all_keys = list(cls._get_formatoptions()) if isinstance(keys, six.string_types): keys = [keys] else: keys = list(keys or sorted(all_keys)) fmto_groups = defaultdict(list) for key in all_keys: fmto_groups[getattr(cls, key).group].append(key) new_i = 0 for i, key in enumerate(keys[:]): if key in fmto_groups: del keys[new_i] for key2 in fmto_groups[key]: if key2 not in keys: keys.insert(new_i, key2) new_i += 1 else: valid, similar, message = check_key( key, all_keys, False, 'formatoption keyword', *args, **kwargs) if not valid: keys.remove(key) new_i -= 1 warn(message) new_i += 1 return keys","Enhance the given keys by groups Parameters ---------- keys: list of str or None If None, the all formatoptions of the given class are used. Group names from the :attr:`psyplot.plotter.groups` mapping are replaced by the formatoptions Other Parameters ---------------- %(check_key.parameters.kwargs)s Returns ------- list of str The enhanced list of the formatoptions" "def endpoint_create(**kwargs): """""" Executor for `globus endpoint create` """""" client = get_client() # get endpoint type, ensure unambiguous. personal = kwargs.pop(""personal"") server = kwargs.pop(""server"") shared = kwargs.pop(""shared"") if personal and (not server) and (not shared): endpoint_type = ""personal"" elif server and (not personal) and (not shared): endpoint_type = ""server"" elif shared and (not personal) and (not server): endpoint_type = ""shared"" else: raise click.UsageError( ""Exactly one of --personal, --server, or --shared is required."" ) # validate options kwargs[""is_globus_connect""] = personal or None validate_endpoint_create_and_update_params(endpoint_type, False, kwargs) # shared endpoint creation if shared: endpoint_id, host_path = shared kwargs[""host_endpoint""] = endpoint_id kwargs[""host_path""] = host_path ep_doc = assemble_generic_doc(""shared_endpoint"", **kwargs) autoactivate(client, endpoint_id, if_expires_in=60) res = client.create_shared_endpoint(ep_doc) # non shared endpoint creation else: # omit `is_globus_connect` key if not GCP, otherwise include as `True` ep_doc = assemble_generic_doc(""endpoint"", **kwargs) res = client.create_endpoint(ep_doc) # output formatted_print( res, fields=(COMMON_FIELDS + GCP_FIELDS if personal else COMMON_FIELDS), text_format=FORMAT_TEXT_RECORD, )",Executor for `globus endpoint create` "def select_entry(self, *arguments): """""" Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`). """""" matches = self.smart_search(*arguments) if len(matches) > 1: logger.info(""More than one match, prompting for choice .."") labels = [entry.name for entry in matches] return matches[labels.index(prompt_for_choice(labels))] else: logger.info(""Matched one entry: %s"", matches[0].name) return matches[0]","Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`)." "def run_migration(name, major_version, minor_version, db, mod, conf={}): """""" Run migration script :param major_version: major version number of the migration :param minor_version: minor version number of the migration :param db: database connection object :param path: path of the migration script :param conf: application configuration (if any) """""" with db.transaction(): mod.up(db, conf) set_version(db, name, major_version, minor_version)","Run migration script :param major_version: major version number of the migration :param minor_version: minor version number of the migration :param db: database connection object :param path: path of the migration script :param conf: application configuration (if any)" "def _apply_scratch_as_phase(args: Dict[str, Any]): """"""Takes scratch shards and applies them as exponentiated phase to state. """""" state = _state_shard(args) state *= np.exp(I_PI_OVER_2 * _scratch_shard(args))",Takes scratch shards and applies them as exponentiated phase to state. "async def get_in_tree_template(link): """"""Get the in-tree json-e template for a given link. By convention, this template is SOURCE_REPO/.taskcluster.yml. Args: link (LinkOfTrust): the parent link to get the source url from. Raises: CoTError: on non-yaml `source_url` KeyError: on non-well-formed source template Returns: dict: the first task in the template. """""" context = link.context source_url = get_source_url(link) if not source_url.endswith(('.yml', '.yaml')): raise CoTError(""{} source url {} doesn't end in .yml or .yaml!"".format( link.name, source_url )) tmpl = await load_json_or_yaml_from_url( context, source_url, os.path.join( context.config[""work_dir""], ""{}_taskcluster.yml"".format(link.name) ) ) return tmpl","Get the in-tree json-e template for a given link. By convention, this template is SOURCE_REPO/.taskcluster.yml. Args: link (LinkOfTrust): the parent link to get the source url from. Raises: CoTError: on non-yaml `source_url` KeyError: on non-well-formed source template Returns: dict: the first task in the template." "def parse_cache_control_header(value, on_update=None, cls=None): """"""Parse a cache control header. The RFC differs between response and request cache control, this method does not. It's your responsibility to not use the wrong control statements. .. versionadded:: 0.5 The `cls` was added. If not specified an immutable :class:`~werkzeug.datastructures.RequestCacheControl` is returned. :param value: a cache control header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.CacheControl` object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.RequestCacheControl` is used. :return: a `cls` object. """""" if cls is None: cls = RequestCacheControl if not value: return cls(None, on_update) return cls(parse_dict_header(value), on_update)","Parse a cache control header. The RFC differs between response and request cache control, this method does not. It's your responsibility to not use the wrong control statements. .. versionadded:: 0.5 The `cls` was added. If not specified an immutable :class:`~werkzeug.datastructures.RequestCacheControl` is returned. :param value: a cache control header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.CacheControl` object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.RequestCacheControl` is used. :return: a `cls` object." "def create(self): """""" Update with current tools for each branch at the version chosen """""" self.add_handlers({'^Q': self.quit}) self.add(npyscreen.TitleText, name='Select which tools to add from each branch selected:', editable=False) self.add(npyscreen.Textfield, value='NOTE tools you have already installed will be ignored', color='STANDOUT', editable=False) i = 6 for branch in self.parentApp.repo_value['versions']: self.tools_tc[branch] = {} self.add(npyscreen.TitleText, name='Branch: ' + branch, editable=False, rely=i, relx=5, max_width=25) tools = self.repo_tools(branch) i += 1 for tool in tools: value = True if tool.startswith('/dev'): value = False # tool in base directory if tool == '' or tool.startswith(':'): tool = '/' + tool self.tools_tc[branch][tool] = self.add(npyscreen.CheckBox, name=tool, value=value, relx=10) i += 1 i += 2",Update with current tools for each branch at the version chosen "def virtual_interface_create(provider, names, **kwargs): ''' Attach private interfaces to a server CLI Example: .. code-block:: bash salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt' ''' client = _get_client() return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs)","Attach private interfaces to a server CLI Example: .. code-block:: bash salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt'" "def handleErrorEvents(self, msg): """""" logs error messages """""" # https://www.interactivebrokers.com/en/software/api/apiguide/tables/api_message_codes.htm if msg.errorCode is not None and msg.errorCode != -1 and \ msg.errorCode not in dataTypes[""BENIGN_ERROR_CODES""]: log = True # log disconnect errors only once if msg.errorCode in dataTypes[""DISCONNECT_ERROR_CODES""]: log = False if msg.errorCode not in self.connection_tracking[""errors""]: self.connection_tracking[""errors""].append(msg.errorCode) log = True if log: self.log.error(""[#%s] %s"" % (msg.errorCode, msg.errorMsg)) self.ibCallback(caller=""handleError"", msg=msg)",logs error messages "def abup_se_plot(mod,species): """""" plot species from one ABUPP file and the se file. You must use this function in the directory where the ABP files are and an ABUP file for model mod must exist. Parameters ---------- mod : integer Model to plot, you need to have an ABUPP file for that model. species : string The species to plot. Notes ----- The species is set to 'C-12'. """""" # Marco, you have already implemented finding headers and columns in # ABUP files. You may want to transplant that into here? species='C-12' filename = 'ABUPP%07d0000.DAT' % mod print(filename) mass,c12=np.loadtxt(filename,skiprows=4,usecols=[1,18],unpack=True) c12_se=self.se.get(mod,'iso_massf','C-12') mass_se=self.se.get(mod,'mass') pyl.plot(mass,c12) pyl.plot(mass_se,c12_se,'o',label='cycle '+str(mod)) pyl.legend()","plot species from one ABUPP file and the se file. You must use this function in the directory where the ABP files are and an ABUP file for model mod must exist. Parameters ---------- mod : integer Model to plot, you need to have an ABUPP file for that model. species : string The species to plot. Notes ----- The species is set to 'C-12'." "def start_health_check(self, recipient): """""" Starts a task for healthchecking `recipient` if there is not one yet. It also whitelists the address """""" if recipient not in self.addresses_events: self.whitelist(recipient) # noop for now, for compatibility ping_nonce = self.nodeaddresses_to_nonces.setdefault( recipient, {'nonce': 0}, # HACK: Allows the task to mutate the object ) events = healthcheck.HealthEvents( event_healthy=Event(), event_unhealthy=Event(), ) self.addresses_events[recipient] = events greenlet_healthcheck = gevent.spawn( healthcheck.healthcheck, self, recipient, self.event_stop, events.event_healthy, events.event_unhealthy, self.nat_keepalive_retries, self.nat_keepalive_timeout, self.nat_invitation_timeout, ping_nonce, ) greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}' greenlet_healthcheck.link_exception(self.on_error) self.greenlets.append(greenlet_healthcheck)","Starts a task for healthchecking `recipient` if there is not one yet. It also whitelists the address" "def range(self, location, distance): """"""Test whether locations are within a given range of ``location``. Args: location (Point): Location to test range against distance (float): Distance to test location is within Returns: list of list of Point: Groups of points in range per segment """""" return (segment.range(location, distance) for segment in self)","Test whether locations are within a given range of ``location``. Args: location (Point): Location to test range against distance (float): Distance to test location is within Returns: list of list of Point: Groups of points in range per segment" "def get_fba_obj_flux(self, objective): """"""Return the maximum objective flux solved by FBA."""""" flux_result = self.solve_fba(objective) return flux_result.get_value(self._v_wt[objective])",Return the maximum objective flux solved by FBA. "def _get_interfaces(self): """"""Get a list of interfaces on this hosting device. :return: List of the interfaces """""" ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) itfcs_raw = parse.find_lines(""^interface GigabitEthernet"") itfcs = [raw_if.strip().split(' ')[1] for raw_if in itfcs_raw] LOG.debug(""Interfaces on hosting device: %s"", itfcs) return itfcs","Get a list of interfaces on this hosting device. :return: List of the interfaces" "def humanize_bytes(b, precision=1): """"""Return a humanized string representation of a number of b. Assumes `from __future__ import division`. >>> humanize_bytes(1) '1 byte' >>> humanize_bytes(1024) '1.0 kB' >>> humanize_bytes(1024*123) '123.0 kB' >>> humanize_bytes(1024*12342) '12.1 MB' >>> humanize_bytes(1024*12342,2) '12.05 MB' >>> humanize_bytes(1024*1234,2) '1.21 MB' >>> humanize_bytes(1024*1234*1111,2) '1.31 GB' >>> humanize_bytes(1024*1234*1111,1) '1.3 GB' """""" # abbrevs = ( # (1 << 50L, 'PB'), # (1 << 40L, 'TB'), # (1 << 30L, 'GB'), # (1 << 20L, 'MB'), # (1 << 10L, 'kB'), # (1, 'b') # ) abbrevs = ( (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'b') ) if b == 1: return '1 byte' for factor, suffix in abbrevs: if b >= factor: break # return '%.*f %s' % (precision, old_div(b, factor), suffix) return '%.*f %s' % (precision, b // factor, suffix)","Return a humanized string representation of a number of b. Assumes `from __future__ import division`. >>> humanize_bytes(1) '1 byte' >>> humanize_bytes(1024) '1.0 kB' >>> humanize_bytes(1024*123) '123.0 kB' >>> humanize_bytes(1024*12342) '12.1 MB' >>> humanize_bytes(1024*12342,2) '12.05 MB' >>> humanize_bytes(1024*1234,2) '1.21 MB' >>> humanize_bytes(1024*1234*1111,2) '1.31 GB' >>> humanize_bytes(1024*1234*1111,1) '1.3 GB'" "def cmd_dns_lookup_reverse(ip_address, verbose): """"""Perform a reverse lookup of a given IP address. Example: \b $ $ habu.dns.lookup.reverse 8.8.8.8 { ""hostname"": ""google-public-dns-a.google.com"" } """""" if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') print(""Looking up %s..."" % ip_address, file=sys.stderr) answer = lookup_reverse(ip_address) if answer: print(json.dumps(answer, indent=4)) else: print(""[X] %s is not valid IPv4/IPV6 address"" % ip_address) return True","Perform a reverse lookup of a given IP address. Example: \b $ $ habu.dns.lookup.reverse 8.8.8.8 { ""hostname"": ""google-public-dns-a.google.com"" }" "def add_argument(self, *args, **kwargs): """"""Add an argument. This method adds a new argument to the current parser. The function is same as ``argparse.ArgumentParser.add_argument``. However, this method tries to determine help messages for the adding argument from some docstrings. If the new arguments belong to some sub commands, the docstring of a function implements behavior of the sub command has ``Args:`` section, and defines same name variable, this function sets such definition to the help message. Positional Args: same positional arguments as argparse.ArgumentParser.add_argument. Keyword Args: same keywards arguments as argparse.ArgumentParser.add_argument. """""" if _HELP not in kwargs: for name in args: name = name.replace(""-"", """") if name in self.__argmap: kwargs[_HELP] = self.__argmap[name] break return super(ArgumentParser, self).add_argument(*args, **kwargs)","Add an argument. This method adds a new argument to the current parser. The function is same as ``argparse.ArgumentParser.add_argument``. However, this method tries to determine help messages for the adding argument from some docstrings. If the new arguments belong to some sub commands, the docstring of a function implements behavior of the sub command has ``Args:`` section, and defines same name variable, this function sets such definition to the help message. Positional Args: same positional arguments as argparse.ArgumentParser.add_argument. Keyword Args: same keywards arguments as argparse.ArgumentParser.add_argument." "def download(self): """"""Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files. """""" self.downloaded_paths = list() for path in self.paths_for_download: downloaded_path = list() utils.mkdir_p(os.path.abspath(self.directory)) sra_run = path.split(""/"")[-1] logger.info(""Analysing %s"" % sra_run) url = type(self).FTP_ADDRESS_TPL.format( range_subdir=sra_run[:6], file_dir=sra_run) logger.debug(""URL: %s"", url) filepath = os.path.abspath( os.path.join(self.directory, ""%s.sra"" % sra_run)) utils.download_from_url( url, filepath, aspera=self.aspera, silent=self.silent, force=self.force) if self.filetype in (""fasta"", ""fastq""): if utils.which('fastq-dump') is None: logger.error(""fastq-dump command not found"") ftype = """" if self.filetype == ""fasta"": ftype = "" --fasta "" cmd = ""fastq-dump"" if utils.which('parallel-fastq-dump') is None: cmd += "" %s --outdir %s %s"" else: logger.debug(""Using parallel fastq-dump"") cmd = "" parallel-fastq-dump --threads %s"" cmd = cmd % self.threads cmd += "" %s --outdir %s -s %s"" cmd = cmd % (ftype, self.directory, filepath) for fqoption, fqvalue in iteritems(self.fastq_dump_options): if fqvalue: cmd += ("" --%s %s"" % (fqoption, fqvalue)) elif fqvalue is None: cmd += ("" --%s"" % fqoption) logger.debug(cmd) process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) logger.info(""Converting to %s/%s*.%s.gz\n"" % ( self.directory, sra_run, self.filetype)) pout, perr = process.communicate() downloaded_path = glob.glob(os.path.join( self.directory, ""%s*.%s.gz"" % (sra_run, self.filetype))) elif self.filetype == 'sra': downloaded_path = glob.glob(os.path.join( self.directory, ""%s*.%s"" % (sra_run, self.filetype))) else: downloaded_path = glob.glob(os.path.join( self.directory, ""%s*"" % sra_run)) logger.error(""Filetype %s not supported."" % self.filetype) if not self.keep_sra and self.filetype != 'sra': # Delete sra file os.unlink(filepath) self.downloaded_paths += downloaded_path return self.downloaded_paths","Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files." "def copy(self) : ""creates a copy of this Message."" result = dbus.dbus_message_copy(self._dbobj) if result == None : raise CallFailed(""dbus_message_copy"") #end if return \ type(self)(result)",creates a copy of this Message. "def handle_packet(self, packet): """"""Lets librtmp look at a packet and send a response if needed."""""" if not isinstance(packet, RTMPPacket): raise ValueError(""A RTMPPacket argument is required"") return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)","Lets librtmp look at a packet and send a response if needed." "def _init(): """""" Create global Config object, parse command flags """""" global config, _data_path, _allowed_config_keys app_dir = _get_vispy_app_dir() if app_dir is not None: _data_path = op.join(app_dir, 'data') _test_data_path = op.join(app_dir, 'test_data') else: _data_path = _test_data_path = None # All allowed config keys and the types they may have _allowed_config_keys = { 'data_path': string_types, 'default_backend': string_types, 'gl_backend': string_types, 'gl_debug': (bool,), 'glir_file': string_types+file_types, 'include_path': list, 'logging_level': string_types, 'qt_lib': string_types, 'dpi': (int, type(None)), 'profile': string_types + (type(None),), 'audit_tests': (bool,), 'test_data_path': string_types + (type(None),), } # Default values for all config options default_config_options = { 'data_path': _data_path, 'default_backend': '', 'gl_backend': 'gl2', 'gl_debug': False, 'glir_file': '', 'include_path': [], 'logging_level': 'info', 'qt_lib': 'any', 'dpi': None, 'profile': None, 'audit_tests': False, 'test_data_path': _test_data_path, } config = Config(**default_config_options) try: config.update(**_load_config()) except Exception as err: raise Exception('Error while reading vispy config file ""%s"":\n %s' % (_get_config_fname(), err.message)) set_log_level(config['logging_level']) _parse_command_line_arguments()","Create global Config object, parse command flags" "def logspace(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True): """""" bins overwrites resolution """""" if type(bins) in [list, np.ndarray]: return bins min = conversion_function(self.min,from_units=self.units,to_units=units) max = conversion_function(self.max,from_units=self.units,to_units=units) if units is None: units = self.units if resolution is None: resolution = 1.0 if bins is None: bins = self.len(resolution=resolution,units=units,conversion_function=conversion_function)# + 1 if units != '1' and end_at_end: # continuous variable behaviour: # we end with the last valid value at the outer edge return np.logspace(np.log10(min),np.log10(max),bins+1)[:-1] # discrete variable behaviour: # we end with the last valid value as its own bin return np.logspace(np.log10(min),np.log10(max),bins)",bins overwrites resolution "def times(x, y): """""" Do something a random amount of times between x & y """""" def decorator(fn): def wrapped(*args, **kwargs): n = random.randint(x, y) for z in range(1, n): fn(*args, **kwargs) return wrapped return decorator","Do something a random amount of times between x & y" "def strip_size(self, location='top', num_lines=None): """""" Breadth of the strip background in inches Parameters ---------- location : str in ``['top', 'right']`` Location of the strip text num_lines : int Number of text lines """""" dpi = 72 theme = self.theme get_property = theme.themeables.property if location == 'right': strip_name = 'strip_text_y' num_lines = num_lines or self.num_vars_y else: strip_name = 'strip_text_x' num_lines = num_lines or self.num_vars_x if not num_lines: return 0 # The facet labels are placed onto the figure using # transAxes dimensions. The line height and line # width are mapped to the same [0, 1] range # i.e (pts) * (inches / pts) * (1 / inches) try: fontsize = get_property(strip_name, 'size') except KeyError: fontsize = float(theme.rcParams.get('font.size', 10)) try: linespacing = get_property(strip_name, 'linespacing') except KeyError: linespacing = 1 # margins on either side of the strip text m1, m2 = self.inner_strip_margins(location) # Using figure.dpi value here does not workout well! breadth = (linespacing*fontsize) * num_lines / dpi breadth = breadth + (m1 + m2) / dpi return breadth","Breadth of the strip background in inches Parameters ---------- location : str in ``['top', 'right']`` Location of the strip text num_lines : int Number of text lines" "def head(self, wg_uuid, uuid): """""" Get one workgroup node."""""" url = ""%(base)s/%(wg_uuid)s/nodes/%(uuid)s"" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid, 'uuid': uuid } # return self.core.head(url) try: # workaround return self.core.get(url) except LinShareException: return False",Get one workgroup node. "def get_zonefile_data( self, zonefile_hash, zonefile_dir ): """""" Get a zonefile by hash Return the serialized zonefile on success Return None on error """""" # check cache atlas_zonefile_data = get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=False ) if atlas_zonefile_data is not None: # check hash zfh = get_zonefile_data_hash( atlas_zonefile_data ) if zfh != zonefile_hash: log.debug(""Invalid local zonefile %s"" % zonefile_hash ) remove_atlas_zonefile_data( zonefile_hash, zonefile_dir ) else: log.debug(""Zonefile %s is local"" % zonefile_hash) return atlas_zonefile_data return None","Get a zonefile by hash Return the serialized zonefile on success Return None on error" "def _serialize_object(self, response_data, request): """""" Create a python datatype from the given python object. This will use ``self.factory`` object's ``serialize()`` function to convert the object into dictionary. If no factory is defined, this will simply return the same data that was given. :param response_data: data returned by the resource """""" if not self.factory: return response_data if isinstance(response_data, (list, tuple)): return map( lambda item: self.factory.serialize(item, request), response_data) else: return self.factory.serialize(response_data, request)","Create a python datatype from the given python object. This will use ``self.factory`` object's ``serialize()`` function to convert the object into dictionary. If no factory is defined, this will simply return the same data that was given. :param response_data: data returned by the resource" "def analyse_action(func): """"""Analyse a function."""""" description = inspect.getdoc(func) or 'undocumented action' arguments = [] args, varargs, kwargs, defaults = inspect.getargspec(func) if varargs or kwargs: raise TypeError('variable length arguments for action not allowed.') if len(args) != len(defaults or ()): raise TypeError('not all arguments have proper definitions') for idx, (arg, definition) in enumerate(zip(args, defaults or ())): if arg.startswith('_'): raise TypeError('arguments may not start with an underscore') if not isinstance(definition, tuple): shortcut = None default = definition else: shortcut, default = definition argument_type = argument_types[type(default)] if isinstance(default, bool) and default is True: arg = 'no-' + arg arguments.append((arg.replace('_', '-'), shortcut, default, argument_type)) return func, description, arguments",Analyse a function. "def text_to_speech(self, text, file, voice_name=None, language=None): """""" Saves given text synthesized audio file, via 'CreateSpeech' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech :param text: text to synthesize :type text: str :param file: file that will be used to save the audio :type file: file :param voice_name: voice name :type voice_name: str :param language: voice language :type language: str """""" endpoint = 'CreateSpeech' data = { 'Input': { 'Data': text, }, 'OutputFormat': { 'Codec': self.codec.upper(), }, 'Parameters': { 'Rate': self.rate, 'Volume': self.volume, 'SentenceBreak': self.sentence_break, 'ParagraphBreak': self.paragraph_break, }, 'Voice': { 'Name': voice_name or self.voice_name, 'Language': language or self.language, }, } response = self._get_response('post', endpoint, data) file.write(response.content)","Saves given text synthesized audio file, via 'CreateSpeech' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech :param text: text to synthesize :type text: str :param file: file that will be used to save the audio :type file: file :param voice_name: voice name :type voice_name: str :param language: voice language :type language: str" "def load_stl_binary(file_obj): """""" Load a binary STL file from a file object. Parameters ---------- file_obj: open file- like object Returns ---------- loaded: kwargs for a Trimesh constructor with keys: vertices: (n,3) float, vertices faces: (m,3) int, indexes of vertices face_normals: (m,3) float, normal vector of each face """""" # the header is always 84 bytes long, we just reference the dtype.itemsize # to be explicit about where that magical number comes from header_length = _stl_dtype_header.itemsize header_data = file_obj.read(header_length) if len(header_data) < header_length: raise HeaderError('Binary STL shorter than a fixed header!') try: header = np.frombuffer(header_data, dtype=_stl_dtype_header) except BaseException: raise HeaderError('Binary header incorrect type') try: # save the header block as a string # there could be any garbage in there so wrap in try metadata = {'header': bytes(header['header'][0]).decode('utf-8').strip()} except BaseException: metadata = {} # now we check the length from the header versus the length of the file # data_start should always be position 84, but hard coding that felt ugly data_start = file_obj.tell() # this seeks to the end of the file # position 0, relative to the end of the file 'whence=2' file_obj.seek(0, 2) # we save the location of the end of the file and seek back to where we # started from data_end = file_obj.tell() file_obj.seek(data_start) # the binary format has a rigidly defined structure, and if the length # of the file doesn't match the header, the loaded version is almost # certainly going to be garbage. len_data = data_end - data_start len_expected = header['face_count'] * _stl_dtype.itemsize # this check is to see if this really is a binary STL file. # if we don't do this and try to load a file that isn't structured properly # we will be producing garbage or crashing hard # so it's much better to raise an exception here. if len_data != len_expected: raise HeaderError('Binary STL has incorrect length in header!') blob = np.frombuffer(file_obj.read(), dtype=_stl_dtype) # all of our vertices will be loaded in order # so faces are just sequential indices reshaped. faces = np.arange(header['face_count'] * 3).reshape((-1, 3)) result = {'vertices': blob['vertices'].reshape((-1, 3)), 'face_normals': blob['normals'].reshape((-1, 3)), 'faces': faces, 'metadata': metadata} return result","Load a binary STL file from a file object. Parameters ---------- file_obj: open file- like object Returns ---------- loaded: kwargs for a Trimesh constructor with keys: vertices: (n,3) float, vertices faces: (m,3) int, indexes of vertices face_normals: (m,3) float, normal vector of each face" "def _conv(self,v): """"""Convert Python values to MySQL values"""""" if isinstance(v,str): return '""%s""' %v.replace(""'"",""''"") elif isinstance(v,datetime.datetime): if v.tzinfo is not None: raise ValueError,\ ""datetime instances with tzinfo not supported"" return '""%s""' %self.db_module.Timestamp(v.year,v.month,v.day, v.hour,v.minute,v.second) elif isinstance(v,datetime.date): return '""%s""' %self.db_module.Date(v.year,v.month,v.day) else: return v",Convert Python values to MySQL values "def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): """"""Send packets at layer 3 and return only the first answer"""""" s = conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) ans, _ = sndrcv(s, x, *args, **kargs) s.close() if len(ans) > 0: return ans[0][1] else: return None",Send packets at layer 3 and return only the first answer "def move_group(self, group = None, parent = None): """"""Append group to a new parent. group and parent must be v1Group-instances. """""" if group is None or type(group) is not v1Group: raise KPError(""A valid group must be given."") elif parent is not None and type(parent) is not v1Group: raise KPError(""parent must be a v1Group."") elif group is parent: raise KPError(""group and parent must not be the same group"") if parent is None: parent = self.root_group if group in self.groups: self.groups.remove(group) group.parent.children.remove(group) group.parent = parent if parent.children: if parent.children[-1] is self.groups[-1]: self.groups.append(group) else: new_index = self.groups.index(parent.children[-1]) + 1 self.groups.insert(new_index, group) else: new_index = self.groups.index(parent) + 1 self.groups.insert(new_index, group) parent.children.append(group) if parent is self.root_group: group.level = 0 else: group.level = parent.level + 1 if group.children: self._move_group_helper(group) group.last_mod = datetime.now().replace(microsecond=0) return True else: raise KPError(""Didn't find given group."")","Append group to a new parent. group and parent must be v1Group-instances." "def _annotate_bifurcated_extend_data(self, row, smaller, larger, tokenize, join): """"""Returns `row` annotated with whether it should be deleted or not. An n-gram is marked for deletion if: * its label count is 1 and its constituent (n-1)-grams also have a label count of 1; or * there is a containing (n+1)-gram that has the same label count. :param row: row of witness n-grams to annotate :type row: `pandas.Series` :param smaller: rows of (n-1)-grams for this witness :type smaller: `pandas.DataFrame` :param larger: rows of (n+1)-grams for this witness :type larger: `pandas.DataFrame` :param tokenize: function to tokenize an n-gram :param join: function to join tokens :rtype: `pandas.Series` """""" lcf = constants.LABEL_COUNT_FIELDNAME nf = constants.NGRAM_FIELDNAME ngram = row[constants.NGRAM_FIELDNAME] label_count = row[constants.LABEL_COUNT_FIELDNAME] if label_count == 1 and not smaller.empty: # Keep a result with a label count of 1 if its # constituents do not also have a count of 1. ngram_tokens = tokenize(ngram) sub_ngram1 = join(ngram_tokens[:-1]) sub_ngram2 = join(ngram_tokens[1:]) pattern = FilteredWitnessText.get_filter_ngrams_pattern( [sub_ngram1, sub_ngram2]) if smaller[smaller[constants.NGRAM_FIELDNAME].str.match(pattern)][ constants.LABEL_COUNT_FIELDNAME].max() == 1: row[DELETE_FIELDNAME] = True elif not larger.empty and larger[larger[nf].str.contains( ngram, regex=False)][lcf].max() == label_count: # Remove a result if the label count of a containing # n-gram is equal to its label count. row[DELETE_FIELDNAME] = True return row","Returns `row` annotated with whether it should be deleted or not. An n-gram is marked for deletion if: * its label count is 1 and its constituent (n-1)-grams also have a label count of 1; or * there is a containing (n+1)-gram that has the same label count. :param row: row of witness n-grams to annotate :type row: `pandas.Series` :param smaller: rows of (n-1)-grams for this witness :type smaller: `pandas.DataFrame` :param larger: rows of (n+1)-grams for this witness :type larger: `pandas.DataFrame` :param tokenize: function to tokenize an n-gram :param join: function to join tokens :rtype: `pandas.Series`" "def _combine_eq_sets(eq_sets, operations): """"""Combines the dicts of _get_equivalent_atom_dicts into one Args: eq_sets (dict) operations (dict) Returns: dict: The returned dictionary has two possible keys: ``eq_sets``: A dictionary of indices mapping to sets of indices, each key maps to indices of all equivalent atoms. The keys are guaranteed to be not equivalent. ``sym_ops``: Twofold nested dictionary. ``operations[i][j]`` gives the symmetry operation that maps atom ``i`` unto ``j``. """""" UNIT = np.eye(3) def all_equivalent_atoms_of_i(i, eq_sets, ops): """"""WORKS INPLACE on operations """""" visited = set([i]) tmp_eq_sets = {j: (eq_sets[j] - visited) for j in eq_sets[i]} while tmp_eq_sets: new_tmp_eq_sets = {} for j in tmp_eq_sets: if j in visited: continue visited.add(j) for k in tmp_eq_sets[j]: new_tmp_eq_sets[k] = eq_sets[k] - visited if i not in ops[k]: ops[k][i] = (np.dot(ops[j][i], ops[k][j]) if k != i else UNIT) ops[i][k] = ops[k][i].T tmp_eq_sets = new_tmp_eq_sets return visited, ops eq_sets = copy.deepcopy(eq_sets) new_eq_sets = {} ops = copy.deepcopy(operations) to_be_deleted = set() for i in eq_sets: if i in to_be_deleted: continue visited, ops = all_equivalent_atoms_of_i(i, eq_sets, ops) to_be_deleted |= visited - {i} for k in to_be_deleted: eq_sets.pop(k, None) return {'eq_sets': eq_sets, 'sym_ops': ops}","Combines the dicts of _get_equivalent_atom_dicts into one Args: eq_sets (dict) operations (dict) Returns: dict: The returned dictionary has two possible keys: ``eq_sets``: A dictionary of indices mapping to sets of indices, each key maps to indices of all equivalent atoms. The keys are guaranteed to be not equivalent. ``sym_ops``: Twofold nested dictionary. ``operations[i][j]`` gives the symmetry operation that maps atom ``i`` unto ``j``." "def query(self, x, k=1, p=2, distance_upper_bound=np.inf): """"""Query the kd-tree for nearest neighbors Parameters ---------- x : array_like, last dimension self.m An array of points to query. k : int, optional The number of nearest neighbors to return. eps : nonnegative float, optional Return approximate nearest neighbors; the kth returned value is guaranteed to be no further than (1+eps) times the distance to the real kth nearest neighbor. p : float, 1<=p<=infinity, optional Which Minkowski p-norm to use. 1 is the sum-of-absolute-values ""Manhattan"" distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance distance_upper_bound : nonnegative float, optional Return only neighbors within this distance. This is used to prune tree searches, so if you are doing a series of nearest-neighbor queries, it may help to supply the distance to the nearest neighbor of the most recent point. Returns ------- d : float or array of floats The distances to the nearest neighbors. If x has shape tuple+(self.m,), then d has shape tuple if k is one, or tuple+(k,) if k is larger than one. Missing neighbors (e.g. when k > n or distance_upper_bound is given) are indicated with infinite distances. If k is None, then d is an object array of shape tuple, containing lists of distances. In either case the hits are sorted by distance (nearest first). i : tuple(int, int) or array of tuple(int, int) The locations of the neighbors in self.data. Locations are given by tuples of (traj_i, frame_i) Examples -------- >>> from msmbuilder.utils import KDTree >>> X1 = 0.3 * np.random.RandomState(0).randn(500, 2) >>> X2 = 0.3 * np.random.RandomState(1).randn(1000, 2) + 10 >>> tree = KDTree([X1, X2]) >>> pts = np.array([[0, 0], [10, 10]]) >>> tree.query(pts) (array([ 0.0034, 0.0102]), array([[ 0, 410], [ 1, 670]])) >>> tree.query(pts[0]) (0.0034, array([ 0, 410])) """""" cdists, cinds = self._kdtree.query(x, k, p, distance_upper_bound) return cdists, self._split_indices(cinds)","Query the kd-tree for nearest neighbors Parameters ---------- x : array_like, last dimension self.m An array of points to query. k : int, optional The number of nearest neighbors to return. eps : nonnegative float, optional Return approximate nearest neighbors; the kth returned value is guaranteed to be no further than (1+eps) times the distance to the real kth nearest neighbor. p : float, 1<=p<=infinity, optional Which Minkowski p-norm to use. 1 is the sum-of-absolute-values ""Manhattan"" distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance distance_upper_bound : nonnegative float, optional Return only neighbors within this distance. This is used to prune tree searches, so if you are doing a series of nearest-neighbor queries, it may help to supply the distance to the nearest neighbor of the most recent point. Returns ------- d : float or array of floats The distances to the nearest neighbors. If x has shape tuple+(self.m,), then d has shape tuple if k is one, or tuple+(k,) if k is larger than one. Missing neighbors (e.g. when k > n or distance_upper_bound is given) are indicated with infinite distances. If k is None, then d is an object array of shape tuple, containing lists of distances. In either case the hits are sorted by distance (nearest first). i : tuple(int, int) or array of tuple(int, int) The locations of the neighbors in self.data. Locations are given by tuples of (traj_i, frame_i) Examples -------- >>> from msmbuilder.utils import KDTree >>> X1 = 0.3 * np.random.RandomState(0).randn(500, 2) >>> X2 = 0.3 * np.random.RandomState(1).randn(1000, 2) + 10 >>> tree = KDTree([X1, X2]) >>> pts = np.array([[0, 0], [10, 10]]) >>> tree.query(pts) (array([ 0.0034, 0.0102]), array([[ 0, 410], [ 1, 670]])) >>> tree.query(pts[0]) (0.0034, array([ 0, 410]))" "def body(self, master): """"""Create dialog body. Return widget that should have initial focus. Inherited from tkinter.simpledialog.Dialog """""" self.frame = ttk.Frame(master, padding=(5, 5, 10, 10)) self.lbl_message = ttk.Label( self.frame, text='Select User Type: ', ) self.rb_student = ttk.Radiobutton( self.frame, text='Student', variable=self.rb_choice, value='student', ) self.rb_tutor = ttk.Radiobutton( self.frame, text='Tutor', variable=self.rb_choice, value='tutor', ) self.btn_ok = ttk.Button( self.frame, text='OK', command=self.ok, ) self.btn_cancel = ttk.Button( self.frame, text='Cancel', command=self.cancel, ) # assemble grid self.frame.grid(column=0, row=0, sticky=(N, S, E, W)) self.lbl_message.grid(column=0, row=0, columnspan=2, sticky=(W, E)) self.rb_student.grid(column=0, row=1, columnspan=2, sticky=W) self.rb_tutor.grid(column=0, row=2, columnspan=2, sticky=W) self.btn_ok.grid(column=0, row=3) self.btn_cancel.grid(column=1, row=3) # key bindings self.bind('', self.ok) self.bind('', self.ok) self.bind('', self.cancel) self.rb_tutor.invoke() return self.btn_ok","Create dialog body. Return widget that should have initial focus. Inherited from tkinter.simpledialog.Dialog" "def get_inst_type(xml_node: XmlNode): '''Returns type by xml node''' (module_path, class_name) = (xml_node.namespace, xml_node.name) try: return import_module(module_path).__dict__[class_name] except (KeyError, ImportError, ModuleNotFoundError): message = 'Import ""{0}.{1}"" is failed.'.format(module_path, class_name) raise RenderingError(message, xml_node.view_info)",Returns type by xml node "def user_exists_p(login, connector): """""" Determine if user exists in specified environment. """""" url = '/users/' + login + '/' _r = connector.get(url) return (_r.status_code == Constants.PULP_GET_OK)",Determine if user exists in specified environment. "def normalize(self, max_order=MAX_ORDER): """"""Ensure that the MOC is ""well-formed"". This structures the MOC as is required for the FITS and JSON representation. This method is invoked automatically when writing to these formats. The number of cells in the MOC will be minimized, so that no area of the sky is covered multiple times by cells at different orders, and if all four neighboring cells are present at an order (other than order 0), they are merged into their parent cell at the next lower order. >>> m = MOC(1, (0, 1, 2, 3)) >>> m.cells 4 >>> m.normalize() >>> m.cells 1 """""" max_order = self._validate_order(max_order) # If the MOC is already normalized and we are not being asked # to reduce the order, then do nothing. if self.normalized and max_order >= self.order: return # Group the pixels by iterating down from the order. At each # order, where all 4 adjacent pixels are present (or we are above # the maximum order) they are replaced with a single pixel in the # next lower order. Otherwise the pixel should appear in the MOC # unless it is already represented at a lower order. for order in range(self.order, 0, -1): pixels = self._orders[order] next_pixels = self._orders[order - 1] new_pixels = set() while pixels: pixel = pixels.pop() # Look to lower orders to ensure this pixel isn't # already covered. check_pixel = pixel already_contained = True for check_order in range(order - 1, -1, -1): check_pixel >>= 2 if check_pixel in self._orders[check_order]: break else: already_contained = False # Check whether this order is above the maximum, or # if we have all 4 adjacent pixels. Also do this if # the pixel was already contained at a lower level # so that we can avoid checking the adjacent pixels. if (already_contained or (order > max_order) or (((pixel ^ 1) in pixels) and ((pixel ^ 2) in pixels) and ((pixel ^ 3) in pixels))): pixels.discard(pixel ^ 1) pixels.discard(pixel ^ 2) pixels.discard(pixel ^ 3) if not already_contained: # Group these pixels by placing the equivalent pixel # for the next order down in the set. next_pixels.add(pixel >> 2) else: new_pixels.add(pixel) if new_pixels: self._orders[order].update(new_pixels) self._normalized = True","Ensure that the MOC is ""well-formed"". This structures the MOC as is required for the FITS and JSON representation. This method is invoked automatically when writing to these formats. The number of cells in the MOC will be minimized, so that no area of the sky is covered multiple times by cells at different orders, and if all four neighboring cells are present at an order (other than order 0), they are merged into their parent cell at the next lower order. >>> m = MOC(1, (0, 1, 2, 3)) >>> m.cells 4 >>> m.normalize() >>> m.cells 1" "def find_entries(self, users, start, *args, **kwargs): """""" Find all entries for all users, from a given starting point. If no starting point is provided, all entries are returned. """""" forever = kwargs.get('all', False) for user in users: if forever: entries = Entry.objects.filter(user=user).order_by('start_time') else: entries = Entry.objects.filter( user=user, start_time__gte=start).order_by( 'start_time') yield entries","Find all entries for all users, from a given starting point. If no starting point is provided, all entries are returned." "def get_cpus_by_arch(cls, arch): """""" Get all CPUs info by arch Args: arch(str): CPU architecture Returns: lxml.etree.element: CPUs by arch XML Raises: :exc:`~LagoException`: If no such ARCH is found """""" with open('/usr/share/libvirt/cpu_map.xml', 'r') as cpu_map: cpu_xml = ET.parse(cpu_map) try: return cpu_xml.xpath('/cpus/arch[@name=""{0}""]'.format(arch))[0] except IndexError: raise LagoException('No such arch: {0}'.format(arch))","Get all CPUs info by arch Args: arch(str): CPU architecture Returns: lxml.etree.element: CPUs by arch XML Raises: :exc:`~LagoException`: If no such ARCH is found" "def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except ValueError: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond","Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting." "def agents(): """"""Lists the currently active agents"""""" print 'The following LiveSync agents are active:' agent_list = LiveSyncAgent.find().order_by(LiveSyncAgent.backend_name, db.func.lower(LiveSyncAgent.name)).all() table_data = [['ID', 'Name', 'Backend', 'Initial Export', 'Queue']] for agent in agent_list: initial = (cformat('%{green!}done%{reset}') if agent.initial_data_exported else cformat('%{yellow!}pending%{reset}')) if agent.backend is None: backend_title = cformat('%{red!}invalid backend ({})%{reset}').format(agent.backend_name) else: backend_title = agent.backend.title table_data.append([unicode(agent.id), agent.name, backend_title, initial, unicode(agent.queue.filter_by(processed=False).count())]) table = AsciiTable(table_data) table.justify_columns[4] = 'right' print table.table if not all(a.initial_data_exported for a in agent_list): print print ""You need to perform the initial data export for some agents."" print cformat(""To do so, run "" ""%{yellow!}indico livesync initial_export %{reset}%{yellow}%{reset} for those agents."")",Lists the currently active agents "def _wait_for_machine_booted(name, suffictinet_texts=None): """""" Internal method wait until machine is ready, in common case means there is running systemd-logind :param name: str with machine name :param suffictinet_texts: alternative text to check in output :return: True or exception """""" # TODO: rewrite it using probes module in utils suffictinet_texts = suffictinet_texts or [""systemd-logind""] # optionally use: ""Unit: machine"" for foo in range(constants.DEFAULT_RETRYTIMEOUT): time.sleep(constants.DEFAULT_SLEEP) out = run_cmd( [""machinectl"", ""--no-pager"", ""status"", name], ignore_status=True, return_output=True) for restr in suffictinet_texts: if restr in out: time.sleep(constants.DEFAULT_SLEEP) return True raise ConuException( ""Unable to start machine %s within %d (machinectl status command dos not contain %s)"" % (name, constants.DEFAULT_RETRYTIMEOUT, suffictinet_texts))","Internal method wait until machine is ready, in common case means there is running systemd-logind :param name: str with machine name :param suffictinet_texts: alternative text to check in output :return: True or exception" "def generate_url(self, expires_in, method='GET', headers=None, query_auth=True, force_http=False, response_headers=None, expires_in_absolute=False): """""" Generate a URL to access this key. :type expires_in: int :param expires_in: How long the url is valid for, in seconds :type method: string :param method: The method to use for retrieving the file (default is GET) :type headers: dict :param headers: Any headers to pass along in the request :type query_auth: bool :param query_auth: :rtype: string :return: The URL to access the key """""" return self.bucket.connection.generate_url(expires_in, method, self.bucket.name, self.name, headers, query_auth, force_http, response_headers, expires_in_absolute)","Generate a URL to access this key. :type expires_in: int :param expires_in: How long the url is valid for, in seconds :type method: string :param method: The method to use for retrieving the file (default is GET) :type headers: dict :param headers: Any headers to pass along in the request :type query_auth: bool :param query_auth: :rtype: string :return: The URL to access the key" "def _interleave(self): """""" Return ndarray from blocks with specified item order Items must be contained in the blocks """""" from pandas.core.dtypes.common import is_sparse dtype = _interleaved_dtype(self.blocks) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if is_sparse(dtype): dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = 'object' result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result","Return ndarray from blocks with specified item order Items must be contained in the blocks" "def _log_progress(self, bytes_downloaded): """"""Logs progress information about ongoing module download. Args: bytes_downloaded: Number of bytes downloaded. """""" self._total_bytes_downloaded += bytes_downloaded now = time.time() if (self._interactive_mode() or now - self._last_progress_msg_print_time > 15): # Print progress message every 15 secs or if interactive progress # tracking is enabled. self._print_download_progress_msg( ""Downloading %s: %s"" % (self._url, tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True))) self._last_progress_msg_print_time = now","Logs progress information about ongoing module download. Args: bytes_downloaded: Number of bytes downloaded." "def calculate_assigned_hours(semester=None, profiles=None): """""" Utility function to recalculate the assigned workshift hours. This function is meant to only be called from the manager shell, it is not referenced anywhere else in the workshift module. Parameters ---------- semester : workshift.models.Semester, optional profiles : list of workshift.models.WorkshiftProfile, optional """""" if semester is None: try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): return if profiles is None: profiles = WorkshiftProfile.objects.filter(semester=semester) for profile in profiles: for pool_hours in profile.pool_hours.all(): shifts = RegularWorkshift.objects.filter( current_assignees=profile, pool=pool_hours.pool, active=True, ) pool_hours.assigned_hours = sum(shift.hours for shift in shifts) pool_hours.save(update_fields=[""assigned_hours""])","Utility function to recalculate the assigned workshift hours. This function is meant to only be called from the manager shell, it is not referenced anywhere else in the workshift module. Parameters ---------- semester : workshift.models.Semester, optional profiles : list of workshift.models.WorkshiftProfile, optional" "def get_bids_examples(data_dir=None, url=None, resume=True, verbose=1, variant='BIDS-examples-1-1.0.0-rc3u5'): """"""Download BIDS-examples-1"""""" warn(DEPRECATION_MSG) variant = 'BIDS-examples-1-1.0.0-rc3u5' if variant not in BIDS_EXAMPLES else variant if url is None: url = BIDS_EXAMPLES[variant][0] md5 = BIDS_EXAMPLES[variant][1] return fetch_file(variant, url, data_dir, resume=resume, verbose=verbose, md5sum=md5)",Download BIDS-examples-1 "def get_connection_params(self): """"""Returns a dict of parameters suitable for get_new_connection."""""" from django.conf import settings settings_dict = self.settings_dict options = settings_dict.get('OPTIONS', {}) autocommit = options.get('autocommit', False) conn_params = { 'server': settings_dict['HOST'], 'database': settings_dict['NAME'], 'user': settings_dict['USER'], 'port': settings_dict.get('PORT', '1433'), 'password': settings_dict['PASSWORD'], 'timeout': self.command_timeout, 'autocommit': autocommit, 'use_mars': options.get('use_mars', False), 'load_balancer': options.get('load_balancer', None), 'failover_partner': options.get('failover_partner', None), 'use_tz': utc if getattr(settings, 'USE_TZ', False) else None, } for opt in _SUPPORTED_OPTIONS: if opt in options: conn_params[opt] = options[opt] self.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None return conn_params",Returns a dict of parameters suitable for get_new_connection. "def is_displayed(self): """"""Whether the element is visible to a user."""""" # Only go into this conditional for browsers that don't use the atom themselves if self._w3c: return self.parent.execute_script( ""return (%s).apply(null, arguments);"" % isDisplayed_js, self) else: return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']",Whether the element is visible to a user. "def show_report(self, report_path): """"""Show batch report file in batchReportFileName using an external app. This method uses QDesktop services to display the report (typically using gedit or similar text editor). :param report_path: Path to the file of batch report. :type report_path: str """""" if self.show_results_popup: url = QtCore.QUrl.fromLocalFile(report_path) # noinspection PyTypeChecker,PyCallByClass,PyArgumentList QtGui.QDesktopServices.openUrl(url) else: # report = open(report_path).read() # LOGGER.info(report) pass","Show batch report file in batchReportFileName using an external app. This method uses QDesktop services to display the report (typically using gedit or similar text editor). :param report_path: Path to the file of batch report. :type report_path: str" "def popen(self, stdout, stderr): """"""Build popen object to run :rtype: subprocess.Popen """""" self.logger.info('Executing command: %s', self.command_str) return subprocess.Popen([self._executor_script], stdout=stdout, stderr=stderr)","Build popen object to run :rtype: subprocess.Popen" "def template_delete(call=None, kwargs=None): ''' Deletes the given template from OpenNebula. Either a name or a template_id must be supplied. .. versionadded:: 2016.3.0 name The name of the template to delete. Can be used instead of ``template_id``. template_id The ID of the template to delete. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f template_delete opennebula name=my-template salt-cloud --function template_delete opennebula template_id=5 ''' if call != 'function': raise SaltCloudSystemExit( 'The template_delete function must be called with -f or --function.' ) if kwargs is None: kwargs = {} name = kwargs.get('name', None) template_id = kwargs.get('template_id', None) if template_id: if name: log.warning( 'Both the \'template_id\' and \'name\' arguments were provided. ' '\'template_id\' will take precedence.' ) elif name: template_id = get_template_id(kwargs={'name': name}) else: raise SaltCloudSystemExit( 'The template_delete function requires either a \'name\' or a \'template_id\' ' 'to be provided.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) response = server.one.template.delete(auth, int(template_id)) data = { 'action': 'template.delete', 'deleted': response[0], 'template_id': response[1], 'error_code': response[2], } return data","Deletes the given template from OpenNebula. Either a name or a template_id must be supplied. .. versionadded:: 2016.3.0 name The name of the template to delete. Can be used instead of ``template_id``. template_id The ID of the template to delete. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f template_delete opennebula name=my-template salt-cloud --function template_delete opennebula template_id=5" "def _setbitpos(self, pos): """"""Move to absolute postion bit in bitstream."""""" if pos < 0: raise ValueError(""Bit position cannot be negative."") if pos > self.len: raise ValueError(""Cannot seek past the end of the data."") self._pos = pos",Move to absolute postion bit in bitstream. "def from_packets(packets, sequence=0, default_size=4096, wiggle_room=2048): """"""Construct a list of Ogg pages from a list of packet data. The algorithm will generate pages of approximately default_size in size (rounded down to the nearest multiple of 255). However, it will also allow pages to increase to approximately default_size + wiggle_room if allowing the wiggle room would finish a packet (only one packet will be finished in this way per page; if the next packet would fit into the wiggle room, it still starts on a new page). This method reduces packet fragmentation when packet sizes are slightly larger than the default page size, while still ensuring most pages are of the average size. Pages are numbered started at 'sequence'; other information is uninitialized. """""" chunk_size = (default_size // 255) * 255 pages = [] page = OggPage() page.sequence = sequence for packet in packets: page.packets.append(b"""") while packet: data, packet = packet[:chunk_size], packet[chunk_size:] if page.size < default_size and len(page.packets) < 255: page.packets[-1] += data else: # If we've put any packet data into this page yet, # we need to mark it incomplete. However, we can # also have just started this packet on an already # full page, in which case, just start the new # page with this packet. if page.packets[-1]: page.complete = False if len(page.packets) == 1: page.position = -1 else: page.packets.pop(-1) pages.append(page) page = OggPage() page.continued = not pages[-1].complete page.sequence = pages[-1].sequence + 1 page.packets.append(data) if len(packet) < wiggle_room: page.packets[-1] += packet packet = b"""" if page.packets: pages.append(page) return pages","Construct a list of Ogg pages from a list of packet data. The algorithm will generate pages of approximately default_size in size (rounded down to the nearest multiple of 255). However, it will also allow pages to increase to approximately default_size + wiggle_room if allowing the wiggle room would finish a packet (only one packet will be finished in this way per page; if the next packet would fit into the wiggle room, it still starts on a new page). This method reduces packet fragmentation when packet sizes are slightly larger than the default page size, while still ensuring most pages are of the average size. Pages are numbered started at 'sequence'; other information is uninitialized." "def _remove_n(self): '''Remove terminal Ns from sequencing results.''' for i, result in enumerate(self.results): largest = max(str(result).split('N'), key=len) start = result.locate(largest)[0][0] stop = start + len(largest) if start != stop: self.results[i] = self.results[i][start:stop]",Remove terminal Ns from sequencing results. "def formatTime(self, record, datefmt=None): """"""Format the log timestamp."""""" _seconds_fraction = record.created - int(record.created) _datetime_utc = time.mktime(time.gmtime(record.created)) _datetime_utc += _seconds_fraction _created = self.converter(_datetime_utc) if datefmt: time_string = _created.strftime(datefmt) else: time_string = _created.strftime('%Y-%m-%dT%H:%M:%S.%fZ') time_string = ""%s,%03d"" % (time_string, record.msecs) return time_string",Format the log timestamp. "def _get_attribute_value_for_node(self, record): """""" Gets the closest value for the current node's attribute matching the given record. """""" # Abort if this node has not get split on an attribute. if self.attr_name is None: return # Otherwise, lookup the attribute value for this node in the # given record. attr = self.attr_name attr_value = record[attr] attr_values = self.get_values(attr) if attr_value in attr_values: return attr_value else: # The value of the attribute in the given record does not directly # map to any previously known values, so apply a missing value # policy. policy = self.tree.missing_value_policy.get(attr) assert policy, \ (""No missing value policy specified for attribute %s."") \ % (attr,) if policy == USE_NEAREST: # Use the value that the tree has seen that's also has the # smallest Euclidean distance to the actual value. assert self.tree.data.header_types[attr] \ in (ATTR_TYPE_DISCRETE, ATTR_TYPE_CONTINUOUS), \ ""The use-nearest policy is invalid for nominal types."" nearest = (1e999999, None) for _value in attr_values: nearest = min( nearest, (abs(_value - attr_value), _value)) _, nearest_value = nearest return nearest_value else: raise Exception(""Unknown missing value policy: %s"" % (policy,))","Gets the closest value for the current node's attribute matching the given record." "def validate_host_parameters(self, host_list, remote_user): ''' Validate and set the host list and remote user parameters. ''' if host_list is None: host_list = self.host_list if remote_user is None: remote_user = self.remote_user if host_list is None or remote_user is None: print ""Host list [%s], remote user [%s] are required"" % \ (host_list, remote_user) return (None, None) return (host_list, remote_user)",Validate and set the host list and remote user parameters. "def make_generic_copy(self, deeply=False): """"""Make a new matching Statement with no provenance. All agents and other attributes besides evidence, belief, supports, and supported_by will be copied over, and a new uuid will be assigned. Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`. If `deeply` is set to True, all the attributes will be deep-copied, which is comparatively slow. Otherwise, attributes of this statement may be altered by changes to the new matching statement. """""" if deeply: kwargs = deepcopy(self.__dict__) else: kwargs = self.__dict__.copy() for attr in ['evidence', 'belief', 'uuid', 'supports', 'supported_by', 'is_activation']: kwargs.pop(attr, None) for attr in ['_full_hash', '_shallow_hash']: my_hash = kwargs.pop(attr, None) my_shallow_hash = kwargs.pop(attr, None) for attr in self._agent_order: attr_value = kwargs.get(attr) if isinstance(attr_value, list): kwargs[attr] = sorted_agents(attr_value) new_instance = self.__class__(**kwargs) new_instance._full_hash = my_hash new_instance._shallow_hash = my_shallow_hash return new_instance","Make a new matching Statement with no provenance. All agents and other attributes besides evidence, belief, supports, and supported_by will be copied over, and a new uuid will be assigned. Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`. If `deeply` is set to True, all the attributes will be deep-copied, which is comparatively slow. Otherwise, attributes of this statement may be altered by changes to the new matching statement." "def augmented_dickey_fuller(x, param): """""" The Augmented Dickey-Fuller test is a hypothesis test which checks whether a unit root is present in a time series sample. This feature calculator returns the value of the respective test statistic. See the statsmodels implementation for references and more details. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {""attr"": x} with x str, either ""teststat"", ""pvalue"" or ""usedlag"" :type param: list :return: the value of this feature :return type: float """""" res = None try: res = adfuller(x) except LinAlgError: res = np.NaN, np.NaN, np.NaN except ValueError: # occurs if sample size is too small res = np.NaN, np.NaN, np.NaN except MissingDataError: # is thrown for e.g. inf or nan in the data res = np.NaN, np.NaN, np.NaN return [('attr_""{}""'.format(config[""attr""]), res[0] if config[""attr""] == ""teststat"" else res[1] if config[""attr""] == ""pvalue"" else res[2] if config[""attr""] == ""usedlag"" else np.NaN) for config in param]","The Augmented Dickey-Fuller test is a hypothesis test which checks whether a unit root is present in a time series sample. This feature calculator returns the value of the respective test statistic. See the statsmodels implementation for references and more details. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {""attr"": x} with x str, either ""teststat"", ""pvalue"" or ""usedlag"" :type param: list :return: the value of this feature :return type: float" "def getK1(self, type='simu'): """""" get quad k1 value :param type: 'simu' or 'online' :return: quad strength,i.e. k1 """""" if type == 'ctrl': pv = self.ctrlinfo.get('k1')['pv'] rval = epics.caget(pv) if rval is None: val = self.getConfig(type='simu')['k1'] else: val = self.unitTrans(rval, direction='+') return val else: return self.getConfig(type='simu')['k1']","get quad k1 value :param type: 'simu' or 'online' :return: quad strength,i.e. k1" "def parse(v, country_code=None): """"""Try parse `v` to currencies; filter by country_code If `v` is a number, try `by_code_num()`; otherwise try: 1) if `v` is 3 character uppercase: `by_alpha3()` 2) Exact symbol match: `by_symbol()` 3) Exact country code match: `by_country()` 4) Fuzzy by symbol match heuristic: `by_symbol_match()` Parameters: v: Union[unicode, int] Either a iso4217 numeric code or some string country_code: Optional[unicode] Iso3166 alpha2 country code. Returns: List[Currency]: found Currency objects. """""" if isinstance(v, int): res = by_code_num(v) return [] if not res else [res] if not isinstance(v, (str, unicode)): raise ValueError('`v` of incorrect type {}. Only accepts str, bytes, unicode and int.') # check alpha3 if re.match('^[A-Z]{3}$', v): res = by_alpha3(v) if res: return [res] # check by symbol res = by_symbol(v, country_code) if res: return res # check by country code res = by_country(v) if res: return res # more or less fuzzy match by symbol res = by_symbol_match(v, country_code) if res: return res","Try parse `v` to currencies; filter by country_code If `v` is a number, try `by_code_num()`; otherwise try: 1) if `v` is 3 character uppercase: `by_alpha3()` 2) Exact symbol match: `by_symbol()` 3) Exact country code match: `by_country()` 4) Fuzzy by symbol match heuristic: `by_symbol_match()` Parameters: v: Union[unicode, int] Either a iso4217 numeric code or some string country_code: Optional[unicode] Iso3166 alpha2 country code. Returns: List[Currency]: found Currency objects." "def mesh(mesh, T_mesh_world=RigidTransform(from_frame='obj', to_frame='world'), style='surface', smooth=False, color=(0.5,0.5,0.5), name=None): """"""Visualize a 3D triangular mesh. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_mesh_world : autolab_core.RigidTransform The pose of the mesh, specified as a transformation from mesh frame to world frame. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. name : str A name for the object to be added. """""" if not isinstance(mesh, trimesh.Trimesh): raise ValueError('Must provide a trimesh.Trimesh object') mp = MaterialProperties( color = np.array(color), k_a = 0.5, k_d = 0.3, k_s = 0.1, alpha = 10.0, smooth=smooth, wireframe=(style == 'wireframe') ) obj = SceneObject(mesh, T_mesh_world, mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)","Visualize a 3D triangular mesh. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_mesh_world : autolab_core.RigidTransform The pose of the mesh, specified as a transformation from mesh frame to world frame. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. name : str A name for the object to be added." "def chunks(iterable, size=50): """"""Break an iterable into lists of size"""""" batch = [] for n in iterable: batch.append(n) if len(batch) % size == 0: yield batch batch = [] if batch: yield batch",Break an iterable into lists of size "def _initial_broks(self, broker_name): """"""Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None """""" with self.app.conf_lock: logger.info(""A new broker just connected : %s"", broker_name) return self.app.sched.fill_initial_broks(broker_name)","Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None" "def download_file(save_path, file_url): """""" Download file from http url link """""" r = requests.get(file_url) # create HTTP response object with open(save_path, 'wb') as f: f.write(r.content) return save_path",Download file from http url link "def create_attachment(cls, session, attachment): """"""Create an attachment. An attachment must be sent to the API before it can be used in a thread. Use this method to create the attachment, then use the resulting hash when creating a thread. Note that HelpScout only supports attachments of 10MB or lower. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be created. Returns: helpscout.models.Attachment: The newly created attachment (hash property only). Use this hash when associating the attachment with a new thread. """""" return super(Conversations, cls).create( session, attachment, endpoint_override='/attachments.json', out_type=Attachment, )","Create an attachment. An attachment must be sent to the API before it can be used in a thread. Use this method to create the attachment, then use the resulting hash when creating a thread. Note that HelpScout only supports attachments of 10MB or lower. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be created. Returns: helpscout.models.Attachment: The newly created attachment (hash property only). Use this hash when associating the attachment with a new thread." "def pending(): """"""Show the number of pending signals by signal type."""""" signalbus = current_app.extensions['signalbus'] pending = [] total_pending = 0 for signal_model in signalbus.get_signal_models(): count = signal_model.query.count() if count > 0: pending.append((count, signal_model.__name__)) total_pending += count if pending: pending.sort() max_chars = len(str(pending[-1][0])) for n, signal_name in pending: click.echo('{} of type ""{}""'.format(str(n).rjust(max_chars), signal_name)) click.echo(25 * '-') click.echo('Total pending: {} '.format(total_pending))",Show the number of pending signals by signal type. "def order_properties(self, property_list=None): """""" Order the properties of a part model using a list of property objects or property names or property id's. :param property_list: ordered list of property names (basestring) or property id's (uuid) :type property_list: list(basestring) :returns: the :class:`Part` with the reordered list of properties :raises APIError: when an Error occurs :raises IllegalArgumentError: When provided a wrong argument Examples -------- >>> front_fork = client.scope('Bike Project').model('Front Fork') >>> front_fork.order_properties(['Material', 'Height (mm)', 'Color']) >>> front_fork = client.scope('Bike Project').model('Front Fork') >>> material = front_fork.property('Material') >>> height = front_fork.property('Height (mm)') >>> color = front_fork.property('Color') >>> front_fork.order_properties([material, height, color]) """""" if self.category != Category.MODEL: raise APIError(""Part should be of category MODEL"") if not isinstance(property_list, list): raise IllegalArgumentError('Expected a list of strings or Property() objects, got a {} object'. format(type(property_list))) order_dict = dict() for prop in property_list: if isinstance(prop, (str, text_type)): order_dict[self.property(name=prop).id] = property_list.index(prop) else: order_dict[prop.id] = property_list.index(prop) r = self._client._request('PUT', self._client._build_url('part', part_id=self.id), data=dict( property_order=json.dumps(order_dict) )) if r.status_code != requests.codes.ok: # pragma: no cover raise APIError(""Could not reorder properties"")","Order the properties of a part model using a list of property objects or property names or property id's. :param property_list: ordered list of property names (basestring) or property id's (uuid) :type property_list: list(basestring) :returns: the :class:`Part` with the reordered list of properties :raises APIError: when an Error occurs :raises IllegalArgumentError: When provided a wrong argument Examples -------- >>> front_fork = client.scope('Bike Project').model('Front Fork') >>> front_fork.order_properties(['Material', 'Height (mm)', 'Color']) >>> front_fork = client.scope('Bike Project').model('Front Fork') >>> material = front_fork.property('Material') >>> height = front_fork.property('Height (mm)') >>> color = front_fork.property('Color') >>> front_fork.order_properties([material, height, color])" "def put(self, *items) -> ""AttrIndexedDict"": ""Add items to the dict that will be indexed by self.attr."" for item in items: self.data[getattr(item, self.attr)] = item return self",Add items to the dict that will be indexed by self.attr. "def register_subclass(cls): """"""Register a subclass for a given interface type and resource class. """""" key = cls.interface_type, cls.resource_class if key in _SUBCLASSES: raise ValueError('Class already registered for %s and %s' % key) _SUBCLASSES[(cls.interface_type, cls.resource_class)] = cls _INTERFACE_TYPES.add(cls.interface_type) _RESOURCE_CLASSES[cls.interface_type].add(cls.resource_class) if cls.is_rc_optional: if cls.interface_type in _DEFAULT_RC: raise ValueError('Default already specified for %s' % cls.interface_type) _DEFAULT_RC[cls.interface_type] = cls.resource_class return cls",Register a subclass for a given interface type and resource class. "def _set_function_node_output(self, node_id, node_attr, no_call, next_nds=None, **kw): """""" Set the function node output from node inputs. :param node_id: Function node id. :type node_id: str :param node_attr: Dictionary of node attributes. :type node_attr: dict[str, T] :param no_call: If True data node estimation function is not used. :type no_call: bool :return: If the output have been evaluated correctly. :rtype: bool """""" # Namespace shortcuts for speed. o_nds, dist = node_attr['outputs'], self.dist # List of nodes that can still be estimated by the function node. output_nodes = next_nds or set(self._succ[node_id]).difference(dist) if not output_nodes: # This function is not needed. self.workflow.remove_node(node_id) # Remove function node. return False wf_add_edge = self._wf_add_edge # Namespace shortcuts for speed. if no_call: for u in output_nodes: # Set workflow out. wf_add_edge(node_id, u) return True args = self._wf_pred[node_id] # List of the function's arguments. args = [args[k]['value'] for k in node_attr['inputs']] try: self._check_function_domain(args, node_attr, node_id) res = async_thread(self, args, node_attr, node_id, **kw) # noinspection PyUnresolvedReferences self.workflow.node[node_id]['results'] = res except SkipNode: return False # Set workflow. for k, v in zip(o_nds, res if len(o_nds) > 1 else [res]): if k in output_nodes and v is not NONE: wf_add_edge(node_id, k, value=v) return True","Set the function node output from node inputs. :param node_id: Function node id. :type node_id: str :param node_attr: Dictionary of node attributes. :type node_attr: dict[str, T] :param no_call: If True data node estimation function is not used. :type no_call: bool :return: If the output have been evaluated correctly. :rtype: bool" "def capture_moves(self, position): """""" Finds out all possible capture moves :rtype: list """""" try: right_diagonal = self.square_in_front(self.location.shift_right()) for move in self._one_diagonal_capture_square(right_diagonal, position): yield move except IndexError: pass try: left_diagonal = self.square_in_front(self.location.shift_left()) for move in self._one_diagonal_capture_square(left_diagonal, position): yield move except IndexError: pass","Finds out all possible capture moves :rtype: list" "def export_context(target_zip): """""" Append context.json to target_zip """""" from django_productline import utils context_file = tasks.get_context_path() return utils.create_or_append_to_zip(context_file, target_zip, 'context.json')",Append context.json to target_zip "def in_string(objet, pattern): """""" abstractSearch dans une chaine, sans tenir compte de la casse. """""" return bool(re.search(pattern, str(objet), flags=re.I)) if objet else False","abstractSearch dans une chaine, sans tenir compte de la casse." "def get(self, key: str, *, prompt: Optional[Message_T] = None, arg_filters: Optional[List[Filter_T]] = None, **kwargs) -> Any: """""" Get an argument with a given key. If the argument does not exist in the current session, a pause exception will be raised, and the caller of the command will know it should keep the session for further interaction with the user. :param key: argument key :param prompt: prompt to ask the user :param arg_filters: argument filters for the next user input :return: the argument value """""" if key in self.state: return self.state[key] self.current_key = key self.current_arg_filters = arg_filters self._current_send_kwargs = kwargs self.pause(prompt, **kwargs)","Get an argument with a given key. If the argument does not exist in the current session, a pause exception will be raised, and the caller of the command will know it should keep the session for further interaction with the user. :param key: argument key :param prompt: prompt to ask the user :param arg_filters: argument filters for the next user input :return: the argument value" "def getTotalDiscountedPrice(self): """"""Compute total discounted price """""" price = self.getDiscountedPrice() vat = self.getVAT() price = price and price or 0 vat = vat and vat or 0 return float(price) + (float(price) * float(vat)) / 100",Compute total discounted price "def _concrete_acl_list(self, acl_docs): """"""Concretize a list of ACL documents. :param list acl_docs: A list of ACL documents. Should come from the API. :returns: A list of :py:class:`ACL` objects. :rtype: list """""" if not acl_docs: return [] return list(filter(None, [self._concrete_acl(acl_doc=doc) for doc in acl_docs]))","Concretize a list of ACL documents. :param list acl_docs: A list of ACL documents. Should come from the API. :returns: A list of :py:class:`ACL` objects. :rtype: list" "def process_entries( self, omimids, transform, included_fields=None, graph=None, limit=None, globaltt=None ): """""" Given a list of omim ids, this will use the omim API to fetch the entries, according to the ```included_fields``` passed as a parameter. If a transformation function is supplied, this will iterate over each entry, and either add the results to the supplied ```graph``` or will return a set of processed entries that the calling function can further iterate. If no ```included_fields``` are provided, this will simply fetch the basic entry from omim, which includes an entry's: prefix, mimNumber, status, and titles. :param omimids: the set of omim entry ids to fetch using their API :param transform: Function to transform each omim entry when looping :param included_fields: A set of what fields are required to retrieve from the API :param graph: the graph to add the transformed data into :return: """""" omimparams = {} # add the included_fields as parameters if included_fields is not None and included_fields: omimparams['include'] = ','.join(included_fields) processed_entries = list() # scrub any omim prefixes from the omimids before processing # cleanomimids = set() # for omimid in omimids: # scrubbed = str(omimid).split(':')[-1] # if re.match(r'^\d+$', str(scrubbed)): # cleanomimids.update(scrubbed) # omimids = list(cleanomimids) cleanomimids = [o.split(':')[-1] for o in omimids] diff = set(omimids) - set(cleanomimids) if diff: LOG.warning('OMIM has %i dirty bits see""\n %s', len(diff), str(diff)) omimids = cleanomimids else: cleanomimids = list() acc = 0 # for counting # note that you can only do request batches of 20 # see info about ""Limits"" at http://omim.org/help/api # TODO 2017 May seems a majority of many groups of 20 # are producing python None for RDF triple Objects groupsize = 20 if not self.test_mode and limit is not None: # just in case the limit is larger than the number of records, maxit = limit if limit > len(omimids): maxit = len(omimids) else: maxit = len(omimids) while acc < maxit: end = min((maxit, acc + groupsize)) # iterate through the omim ids list, # and fetch from the OMIM api in batches of 20 if self.test_mode: intersect = list( set([str(i) for i in self.test_ids]) & set(omimids[acc:end])) # some of the test ids are in the omimids if intersect: LOG.info(""found test ids: %s"", intersect) omimparams.update({'mimNumber': ','.join(intersect)}) else: acc += groupsize continue else: omimparams.update({'mimNumber': ','.join(omimids[acc:end])}) url = OMIMAPI + urllib.parse.urlencode(omimparams) try: req = urllib.request.urlopen(url) except HTTPError as e: # URLError? LOG.warning('fetching: %s', url) error_msg = e.read() if re.search(r'The API key: .* is invalid', str(error_msg)): msg = ""API Key not valid"" raise HTTPError(url, e.code, msg, e.hdrs, e.fp) LOG.error(""Failed with: %s"", str(error_msg)) break resp = req.read().decode() acc += groupsize myjson = json.loads(resp) # snag a copy with open('./raw/omim/_' + str(acc) + '.json', 'w') as fp: json.dump(myjson, fp) entries = myjson['omim']['entryList'] for e in entries: # apply the data transformation, and save it to the graph processed_entry = transform(e, graph, globaltt) if processed_entry is not None: processed_entries.append(processed_entry) # ### end iterating over batch of entries return processed_entries","Given a list of omim ids, this will use the omim API to fetch the entries, according to the ```included_fields``` passed as a parameter. If a transformation function is supplied, this will iterate over each entry, and either add the results to the supplied ```graph``` or will return a set of processed entries that the calling function can further iterate. If no ```included_fields``` are provided, this will simply fetch the basic entry from omim, which includes an entry's: prefix, mimNumber, status, and titles. :param omimids: the set of omim entry ids to fetch using their API :param transform: Function to transform each omim entry when looping :param included_fields: A set of what fields are required to retrieve from the API :param graph: the graph to add the transformed data into :return:" "async def generate_credentials(self): """"""Create new credentials for authentication. Credentials that have been authenticated shall be saved and loaded with load_credentials before playing anything. If credentials are lost, authentication must be performed again. """""" identifier, seed = new_credentials() return '{0}:{1}'.format(identifier, seed.decode().upper())","Create new credentials for authentication. Credentials that have been authenticated shall be saved and loaded with load_credentials before playing anything. If credentials are lost, authentication must be performed again." "def pivot_table(self, index, columns, values='value', aggfunc='count', fill_value=None, style=None): """"""Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap' """""" index = [index] if isstr(index) else index columns = [columns] if isstr(columns) else columns df = self.data # allow 'aggfunc' to be passed as string for easier user interface if isstr(aggfunc): if aggfunc == 'count': df = self.data.groupby(index + columns, as_index=False).count() fill_value = 0 elif aggfunc == 'mean': df = self.data.groupby(index + columns, as_index=False).mean()\ .round(2) aggfunc = np.sum fill_value = 0 if style == 'heatmap' else """" elif aggfunc == 'sum': aggfunc = np.sum fill_value = 0 if style == 'heatmap' else """" df = df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value) return df","Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap'" "def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0): """"""Display the system as points. :param float size: the size of the points. """""" if colorlist is None: colorlist = [get_atom_color(t) for t in self.topology['atom_types']] if highlight is not None: if isinstance(highlight, int): colorlist[highlight] = 0xff0000 if isinstance(highlight, (list, np.ndarray)): for i in highlight: colorlist[i] = 0xff0000 sizes = [size] * len(self.topology['atom_types']) points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'), 'colors': colorlist, 'sizes': sizes, 'opacity': opacity}) # Update closure def update(self=self, points=points): self.update_representation(points, {'coordinates': self.coordinates.astype('float32')}) self.update_callbacks.append(update) self.autozoom(self.coordinates)","Display the system as points. :param float size: the size of the points." "def str2hashalgo(description): '''Convert the name of a hash algorithm as described in the OATH specifications, to a python object handling the digest algorithm interface, PEP-xxx. :param description the name of the hash algorithm, example :rtype: a hash algorithm class constructor ''' algo = getattr(hashlib, description.lower(), None) if not callable(algo): raise ValueError('Unknown hash algorithm %s' % description) return algo","Convert the name of a hash algorithm as described in the OATH specifications, to a python object handling the digest algorithm interface, PEP-xxx. :param description the name of the hash algorithm, example :rtype: a hash algorithm class constructor" "def execute(self, jvm_options=None, args=None, executor=None, workunit_factory=None, workunit_name=None, workunit_labels=None): """"""Executes the ivy commandline client with the given args. Raises Ivy.Error if the command fails for any reason. :param executor: Java executor to run ivy with. """""" # NB(gmalmquist): It should be OK that we can't declare a subsystem_dependency in this file # (because it's just a plain old object), because Ivy is only constructed by Bootstrapper, which # makes an explicit call to IvySubsystem.global_instance() in its constructor, which in turn has # a declared dependency on DistributionLocator. executor = executor or SubprocessExecutor(DistributionLocator.cached()) runner = self.runner(jvm_options=jvm_options, args=args, executor=executor) try: with self.resolution_lock: result = util.execute_runner(runner, workunit_factory, workunit_name, workunit_labels) if result != 0: raise self.Error('Ivy command failed with exit code {}{}'.format( result, ': ' + ' '.join(args) if args else '')) except executor.Error as e: raise self.Error('Problem executing ivy: {}'.format(e))","Executes the ivy commandline client with the given args. Raises Ivy.Error if the command fails for any reason. :param executor: Java executor to run ivy with." "def do_info(self, subcmd, opts, *args): """"""Display information about a file or directory. usage: info [PATH...] Print information about each PATH (default: '.'). ${cmd_option_list} """""" print ""'svn %s' opts: %s"" % (subcmd, opts) print ""'svn %s' args: %s"" % (subcmd, args)","Display information about a file or directory. usage: info [PATH...] Print information about each PATH (default: '.'). ${cmd_option_list}" "def format_datetime(time): """""" Formats a date, converting the time to the user timezone if one is specified """""" user_time_zone = timezone.get_current_timezone() if time.tzinfo is None: time = time.replace(tzinfo=pytz.utc) user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT')) time = time.astimezone(user_time_zone) return time.strftime(""%b %d, %Y %H:%M"")","Formats a date, converting the time to the user timezone if one is specified" "def _clamp_string(self, row_item, column_index, delimiter=''): """"""Clamp `row_item` to fit in column referred by column_index. This method considers padding and appends the delimiter if `row_item` needs to be truncated. Parameters ---------- row_item: str String which should be clamped. column_index: int Index of the column `row_item` belongs to. delimiter: str String which is to be appended to the clamped string. Returns ------- str The modified string which fits in it's column. """""" width = (self._table.column_widths[column_index] - self._table.left_padding_widths[column_index] - self._table.right_padding_widths[column_index]) if termwidth(row_item) <= width: return row_item else: if width - len(delimiter) >= 0: clamped_string = (textwrap(row_item, width-len(delimiter))[0] + delimiter) else: clamped_string = delimiter[:width] return clamped_string","Clamp `row_item` to fit in column referred by column_index. This method considers padding and appends the delimiter if `row_item` needs to be truncated. Parameters ---------- row_item: str String which should be clamped. column_index: int Index of the column `row_item` belongs to. delimiter: str String which is to be appended to the clamped string. Returns ------- str The modified string which fits in it's column." "def urljoin(domain, path=None, scheme=None): """""" Joins a domain, path and scheme part together, returning a full URL. :param domain: the domain, e.g. ``example.com`` :param path: the path part of the URL, e.g. ``/example/`` :param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the value of ``settings.DEFAULT_URL_SCHEME`` :returns: a full URL """""" if scheme is None: scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http') return urlunparse((scheme, domain, path or '', None, None, None))","Joins a domain, path and scheme part together, returning a full URL. :param domain: the domain, e.g. ``example.com`` :param path: the path part of the URL, e.g. ``/example/`` :param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the value of ``settings.DEFAULT_URL_SCHEME`` :returns: a full URL" "def remove_handler(self, handler): """""" Removes a previously added event handler. """""" while handler in self.handlers: self.handlers.remove(handler)",Removes a previously added event handler. "def lint(to_lint): """""" Run all linters against a list of files. :param to_lint: a list of files to lint. """""" exit_code = 0 for linter, options in (('pyflakes', []), ('pep8', [])): try: output = local[linter](*(options + to_lint)) except commands.ProcessExecutionError as e: output = e.stdout if output: exit_code = 1 print ""{0} Errors:"".format(linter) print output output = hacked_pep257(to_lint) if output: exit_code = 1 print ""Docstring Errors:"".format(linter.upper()) print output sys.exit(exit_code)","Run all linters against a list of files. :param to_lint: a list of files to lint." "def make_api_method(func): """""" Provides a single entry point for modifying all API methods. For now this is limited to allowing the client object to be modified with an `extra_params` keyword arg to each method, that is then used as the params for each web service request. Please note that this is an unsupported feature for advanced use only. It's also currently incompatibile with multiple threads, see GH #160. """""" @functools.wraps(func) def wrapper(*args, **kwargs): args[0]._extra_params = kwargs.pop(""extra_params"", None) result = func(*args, **kwargs) try: del args[0]._extra_params except AttributeError: pass return result return wrapper","Provides a single entry point for modifying all API methods. For now this is limited to allowing the client object to be modified with an `extra_params` keyword arg to each method, that is then used as the params for each web service request. Please note that this is an unsupported feature for advanced use only. It's also currently incompatibile with multiple threads, see GH #160." "def put(self, id): """""" Update a revision by ID :param id: BSON id :return: """""" collection_name = self.request.headers.get(""collection"") if not collection_name: self.raise_error(400, ""Missing a collection name header"") self.client = BaseAsyncMotorDocument(""%s_revisions"" % collection_name) super(self.__class__, self).put(id)","Update a revision by ID :param id: BSON id :return:" "def save_prep(cls, instance_or_instances): """"""Preprocess the object before the object is saved. This automatically gets called when the save method gets called. """""" instances = make_obj_list(instance_or_instances) tokens = set(cls.objects.get_available_tokens( count=len(instances), token_length=cls.token_length )) for instance in instances: if not instance.token: instance.token = tokens.pop() super(AbstractTokenModel, cls).save_prep( instance_or_instances=instances )","Preprocess the object before the object is saved. This automatically gets called when the save method gets called." "def codes(self): ''' Get iterator for all unicode code points contained in this range group. :yields: iterator of character index (int) :ytype: int ''' for start, end in self: for item in range(start, end): yield item","Get iterator for all unicode code points contained in this range group. :yields: iterator of character index (int) :ytype: int" "def serialize(self): """""" Generates the messaging-type-related part of the message dictionary. """""" if self.response is not None: return {'messaging_type': 'RESPONSE'} if self.update is not None: return {'messaging_type': 'UPDATE'} if self.tag is not None: return { 'messaging_type': 'MESSAGE_TAG', 'tag': self.tag.value, } if self.subscription is not None: return {'messaging_type': 'NON_PROMOTIONAL_SUBSCRIPTION'}",Generates the messaging-type-related part of the message dictionary. "def filter_genes(self, gene_names : Iterable[str]): """"""Filter the expression matrix against a set of genes. Parameters ---------- gene_names: list of str The genome to filter the genes against. Returns ------- ExpMatrix The filtered expression matrix. """""" filt = self.loc[self.index & gene_names] return filt","Filter the expression matrix against a set of genes. Parameters ---------- gene_names: list of str The genome to filter the genes against. Returns ------- ExpMatrix The filtered expression matrix." "def wrap_parser_error(self, data, renderer_context): """""" Convert parser errors to the JSON API Error format Parser errors have a status code of 400, like field errors, but have the same native format as generic errors. Also, the detail message is often specific to the input, so the error is listed as a 'detail' rather than a 'title'. """""" response = renderer_context.get(""response"", None) status_code = response and response.status_code if status_code != 400: raise WrapperNotApplicable('Status code must be 400.') if list(data.keys()) != ['detail']: raise WrapperNotApplicable('Data must only have ""detail"" key.') # Probably a parser error, unless `detail` is a valid field view = renderer_context.get(""view"", None) model = self.model_from_obj(view) if 'detail' in model._meta.get_all_field_names(): raise WrapperNotApplicable() return self.wrap_error( data, renderer_context, keys_are_fields=False, issue_is_title=False)","Convert parser errors to the JSON API Error format Parser errors have a status code of 400, like field errors, but have the same native format as generic errors. Also, the detail message is often specific to the input, so the error is listed as a 'detail' rather than a 'title'." "def make_button_widget(cls, label, file_path=None, handler=None, style=None, layout=Layout(width='auto')): ""Return a Button widget with specified `handler`."" btn = widgets.Button(description=label, layout=layout) if handler is not None: btn.on_click(handler) if style is not None: btn.button_style = style btn.file_path = file_path btn.flagged_for_delete = False return btn",Return a Button widget with specified `handler`. "def get_bases(definition_dict, loader): """"""Collect dependencies. """""" bases = definition_dict.get('bases', ()) if bases: bases = (loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0], **b) for b in bases) return SimpleChainmap(definition_dict, *bases) else: return definition_dict",Collect dependencies. "def quick_layout_settings(self): """"""Layout settings dialog"""""" get = CONF.get set_ = CONF.set section = 'quick_layouts' names = get(section, 'names') order = get(section, 'order') active = get(section, 'active') dlg = self.dialog_layout_settings(self, names, order, active) if dlg.exec_(): set_(section, 'names', dlg.names) set_(section, 'order', dlg.order) set_(section, 'active', dlg.active) self.quick_layout_set_menu()",Layout settings dialog "def find_new_posts_and_pages(db): """"""Walk content dir, put each post and page in the database"""""" Q = Query() for root, dirs, files in os.walk(CONFIG['content_root']): for filename in sorted([f for f in files if f.endswith(('md', 'markdown'))]): fullpath = os.path.join(root, filename) _p = fullpath.split(CONFIG['content_root'])[-1].lstrip('/') new_mtime = int(os.path.getmtime(fullpath)) e, item = None, None for collection in ['posts', 'pages']: item = db[collection].get(Q.filename == _p) if item: if new_mtime > item['mtime']: db[collection].update({'mtime': new_mtime}, doc_ids=[item.doc_id]) e = Entry(fullpath, doc_id=item.doc_id) break if not item: e = Entry(fullpath) if e: yield e, e.id","Walk content dir, put each post and page in the database" "def Handle(self, args, token=None): """"""Renders list of descriptors for all the flows."""""" if data_store.RelationalDBEnabled(): flow_iterator = iteritems(registry.FlowRegistry.FLOW_REGISTRY) else: flow_iterator = iteritems(registry.AFF4FlowRegistry.FLOW_REGISTRY) result = [] for name, cls in sorted(flow_iterator): # Flows without a category do not show up in the GUI. if not getattr(cls, ""category"", None): continue # Only show flows that the user is allowed to start. try: if self.access_check_fn: self.access_check_fn(token.username, name) except access_control.UnauthorizedAccess: continue result.append(ApiFlowDescriptor().InitFromFlowClass(cls, token=token)) return ApiListFlowDescriptorsResult(items=result)",Renders list of descriptors for all the flows. "def set_group_member_orphan(self, member_id): """""" Make a non-orphan member trigger into an orphan. :param member_id: Member Trigger id to be made an orphan. """""" self._put(self._service_url(['triggers', 'groups', 'members', member_id, 'orphan']), data=None, parse_json=False)","Make a non-orphan member trigger into an orphan. :param member_id: Member Trigger id to be made an orphan." "def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results","Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return:" "def sanitize_filename(filename): """""" Make sure filenames are valid paths. Returns: str: """""" sanitized_filename = re.sub(r'[/\\:*?""<>|]', '-', filename) sanitized_filename = sanitized_filename.replace('&', 'and') sanitized_filename = sanitized_filename.replace('""', '') sanitized_filename = sanitized_filename.replace(""'"", '') sanitized_filename = sanitized_filename.replace(""/"", '') sanitized_filename = sanitized_filename.replace(""\\"", '') # Annoying. if sanitized_filename[0] == '.': sanitized_filename = u'dot' + sanitized_filename[1:] return sanitized_filename","Make sure filenames are valid paths. Returns: str:" "def widen(self): """"""Increase the interval size."""""" t, h = self.time, self.half_duration h *= self.scaling_coeff_x self.set_interval((t - h, t + h))",Increase the interval size. "def _get_tag(self, response, tag_name=""html"", encoding=""utf-8""): """""" Iterates response content and returns the tag if found. If not found, the response content is fully consumed so self._html equals response.content, and it returns None. """""" def find_tag(tag_name): tag_start = tag_end = None found = lambda: \ tag_start is not None and tag_end is not None html = self._html.lower() start = html.find(""<%s"" % tag_name) if start >= 0: tag_start = start else: return None # no tag end = html.find("""" % tag_name) if end > tag_start: tag_end = end+len(tag_name)+3 elif consumed: tag_end = -1 # till the end if found(): return self._html[tag_start:tag_end] return None consumed = getattr(response, 'consumed', False) if not consumed: stream = getattr(response, 'stream', None) if stream is None: stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True response.stream = stream while True: try: chunk = next(stream) self._html += chunk tag = find_tag(tag_name) if tag: return tag if len(self._html) > config.HTML_MAX_BYTESIZE: raise HTMLParseError('Maximum response size reached.') except StopIteration: response.consumed = True tag = find_tag(tag_name) return decode(tag, encoding)","Iterates response content and returns the tag if found. If not found, the response content is fully consumed so self._html equals response.content, and it returns None." "def write_info(self, w): """""" Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return: """""" w.write_b_varchar("""") # db_name, should be empty w.write_b_varchar(self._table_type.typ_schema) w.write_b_varchar(self._table_type.typ_name)","Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return:" "def calculate_offset(percent, original_length, length): """""" Calculates crop offset based on percentage. :param percent: A percentage representing the size of the offset. :param original_length: The length the distance that should be cropped. :param length: The desired length. :return: The offset in pixels :rtype: int """""" return int( max( 0, min(percent * original_length / 100.0, original_length - length / 2) - length / 2) )","Calculates crop offset based on percentage. :param percent: A percentage representing the size of the offset. :param original_length: The length the distance that should be cropped. :param length: The desired length. :return: The offset in pixels :rtype: int" "def get(self, name, handler, request=None): """"""Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation. """""" chan = self._channel(name) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), pvRequest=wrapRequest(request), get=True, put=False)","Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation." "def _get_event_cls(view_obj, events_map): """""" Helper function to get event class. :param view_obj: Instance of View that processes the request. :param events_map: Map of events from which event class should be picked. :returns: Found event class. """""" request = view_obj.request view_method = getattr(view_obj, request.action) event_action = ( getattr(view_method, '_event_action', None) or request.action) return events_map[event_action]","Helper function to get event class. :param view_obj: Instance of View that processes the request. :param events_map: Map of events from which event class should be picked. :returns: Found event class." "def get_mosaic_by_name(self, name): '''Get the API representation of a mosaic by name. :param name str: The name of the mosaic :returns: :py:Class:`planet.api.models.Mosaics` :raises planet.api.exceptions.APIException: On API error. ''' params = {'name__is': name} url = self._url('basemaps/v1/mosaics') return self._get(url, models.Mosaics, params=params).get_body()","Get the API representation of a mosaic by name. :param name str: The name of the mosaic :returns: :py:Class:`planet.api.models.Mosaics` :raises planet.api.exceptions.APIException: On API error." "def facter_info(): """"""Returns data from facter. """""" with suppress(FileNotFoundError): # facter may not be installed proc = subprocess.Popen(['facter', '--yaml'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if not proc.returncode: data = serializer.load(stdout) return {'facter': data}",Returns data from facter. "def sample_observed_state(self, s: pd.Series) -> Dict: """""" Sample observed state vector. This is the implementation of the emission function. Args: s: Latent state vector. Returns: Observed state vector. """""" return { n[0]: { i.name: np.random.normal(s[n[0]] * i.mean, i.stdev) for i in n[1][""indicators""].values() } for n in self.nodes(data=True) }","Sample observed state vector. This is the implementation of the emission function. Args: s: Latent state vector. Returns: Observed state vector." "def _merge_pool_kwargs(self, override): """""" Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """""" base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs","Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary." "def list_group_memberships(self, group_id, filter_states=None): """""" List group memberships. List the members of a group. """""" path = {} data = {} params = {} # REQUIRED - PATH - group_id """"""ID"""""" path[""group_id""] = group_id # OPTIONAL - filter_states """"""Only list memberships with the given workflow_states. By default it will return all memberships."""""" if filter_states is not None: self._validate_enum(filter_states, [""accepted"", ""invited"", ""requested""]) params[""filter_states""] = filter_states self.logger.debug(""GET /api/v1/groups/{group_id}/memberships with query params: {params} and form data: {data}"".format(params=params, data=data, **path)) return self.generic_request(""GET"", ""/api/v1/groups/{group_id}/memberships"".format(**path), data=data, params=params, all_pages=True)","List group memberships. List the members of a group." "def cos(cls, x: 'TensorFluent') -> 'TensorFluent': '''Returns a TensorFluent for the cos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the cos function. ''' return cls._unary_op(x, tf.cos, tf.float32)","Returns a TensorFluent for the cos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the cos function." "def to_html(ds: Any) -> str: """""" Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner. """""" rm = min(10, ds.shape[0]) cm = min(10, ds.shape[1]) html = ""

"" if ds.attrs.__contains__(""title""): html += """" + ds.attrs[""title""] + "" "" html += f""{ds.shape[0]} rows, {ds.shape[1]} columns, {len(ds.layers)} layer{'s' if len(ds.layers) > 1 else ''}
(showing up to 10x10)
"" html += ds.filename + ""
"" for (name, val) in ds.attrs.items(): html += f""name: {val}
"" html += """" # Emit column attributes for ca in ds.col_attrs.keys(): html += """" for ra in ds.row_attrs.keys(): html += """" # Space for row attrs html += """" # Col attr name for v in ds.col_attrs[ca][:cm]: html += """" if ds.shape[1] > cm: html += """" html += """" # Emit row attribute names html += """" for ra in ds.row_attrs.keys(): html += """" # Row attr name html += """" # Space for col attrs for v in range(cm): html += """" if ds.shape[1] > cm: html += """" html += """" # Emit row attr values and matrix values for row in range(rm): html += """" for ra in ds.row_attrs.keys(): html += """" html += """" # Space for col attrs for v in ds[row, :cm]: html += """" if ds.shape[1] > cm: html += """" html += """" # Emit ellipses if ds.shape[0] > rm: html += """" for v in range(rm + 1 + len(ds.row_attrs.keys())): html += """" if ds.shape[1] > cm: html += """" html += """" html += ""
 "" + ca + """" + str(v) + ""...
"" + ra + ""  ...
"" + str(ds.row_attrs[ra][row]) + "" "" + str(v) + ""...
......
"" return html","Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner." "def update_extent(self, extent): """"""Update extent value in GUI based from an extent. :param extent: A list in the form [xmin, ymin, xmax, ymax] where all coordinates provided are in Geographic / EPSG:4326. :type extent: list """""" self.x_minimum.setValue(extent[0]) self.y_minimum.setValue(extent[1]) self.x_maximum.setValue(extent[2]) self.y_maximum.setValue(extent[3]) # Updating the country if possible. rectangle = QgsRectangle(extent[0], extent[1], extent[2], extent[3]) center = rectangle.center() for country in self.bbox_countries: for polygon in self.bbox_countries[country]: if polygon.contains(center): index = self.country_comboBox.findText(country) self.country_comboBox.setCurrentIndex(index) break else: # Continue if the inner loop wasn't broken. continue # Inner loop was broken, break the outer. break else: self.country_comboBox.setCurrentIndex(0)","Update extent value in GUI based from an extent. :param extent: A list in the form [xmin, ymin, xmax, ymax] where all coordinates provided are in Geographic / EPSG:4326. :type extent: list" "def _parse_volume_descriptors(self): # type: () -> None ''' An internal method to parse the volume descriptors on an ISO. Parameters: None. Returns: Nothing. ''' # Ecma-119 says that the Volume Descriptor set is a sequence of volume # descriptors recorded in consecutively numbered Logical Sectors # starting with Logical Sector Number 16. Since sectors are 2048 bytes # in length, we start at sector 16 * 2048 # Ecma-119, 6.2.1 says that the Volume Space is divided into a System # Area and a Data Area, where the System Area is in logical sectors 0 # to 15, and whose contents is not specified by the standard. self._cdfp.seek(16 * 2048) while True: # All volume descriptors are exactly 2048 bytes long curr_extent = self._cdfp.tell() // 2048 vd = self._cdfp.read(2048) if len(vd) != 2048: raise pycdlibexception.PyCdlibInvalidISO('Failed to read entire volume descriptor') (desc_type, ident) = struct.unpack_from('=B5s', vd, 0) if desc_type not in (headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY, headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR, headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD, headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) or ident not in (b'CD001', b'BEA01', b'NSR02', b'TEA01'): # We read the next extent, and it wasn't a descriptor. Abort # the loop, remembering to back up the input file descriptor. self._cdfp.seek(-2048, os.SEEK_CUR) break if desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY: pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY) pvd.parse(vd, curr_extent) self.pvds.append(pvd) elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR: vdst = headervd.VolumeDescriptorSetTerminator() vdst.parse(vd, curr_extent) self.vdsts.append(vdst) elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD: # Both an Ecma-119 Boot Record and a Ecma-TR 071 UDF-Bridge # Beginning Extended Area Descriptor have the first byte as 0, # so we can't tell which it is until we look at the next 5 # bytes (Boot Record will have 'CD001', BEAD will have 'BEA01'). if ident == b'CD001': br = headervd.BootRecord() br.parse(vd, curr_extent) self.brs.append(br) elif ident == b'BEA01': self._has_udf = True self.udf_bea.parse(vd, curr_extent) elif ident == b'NSR02': self.udf_nsr.parse(vd, curr_extent) elif ident == b'TEA01': self.udf_tea.parse(vd, curr_extent) else: # This isn't really possible, since we would have aborted # the loop above. raise pycdlibexception.PyCdlibInvalidISO('Invalid volume identification type') elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY: svd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) svd.parse(vd, curr_extent) self.svds.append(svd) # Since we checked for the valid descriptors above, it is impossible # to see an invalid desc_type here, so no check necessary. # The language in Ecma-119, p.8, Section 6.7.1 says: # # The sequence shall contain one Primary Volume Descriptor (see 8.4) recorded at least once. # # The important bit there is ""at least one"", which means that we have # to accept ISOs with more than one PVD. if not self.pvds: raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one PVD') self.pvd = self.pvds[0] # Make sure any other PVDs agree with the first one. for pvd in self.pvds[1:]: if pvd != self.pvd: raise pycdlibexception.PyCdlibInvalidISO('Multiple occurrences of PVD did not agree!') pvd.root_dir_record = self.pvd.root_dir_record if not self.vdsts: raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one Volume Descriptor Set Terminator')","An internal method to parse the volume descriptors on an ISO. Parameters: None. Returns: Nothing." "def write_byte(self, address, value): """"""Writes the byte to unaddressed register in a device. """""" LOGGER.debug(""Writing byte %s to device %s!"", bin(value), hex(address)) return self.driver.write_byte(address, value)",Writes the byte to unaddressed register in a device. "def drug_name_to_generic(drug_name: str, unknown_to_default: bool = False, default: str = None, include_categories: bool = False) -> str: """""" Converts a drug name to the name of its generic equivalent. """""" drug = get_drug(drug_name, include_categories=include_categories) if drug is not None: return drug.generic_name return default if unknown_to_default else drug_name",Converts a drug name to the name of its generic equivalent. "def get_total_size(self, *files): """"""Calculate the total size of the given files """""" # Recursive unpack an eventual list of lists def iterate(item): if isinstance(item, (list, tuple)): for i in item: for ii in iterate(i): yield ii else: yield item # Calculate the total size of the given objects starting with an # initial size of 0 return reduce(lambda x, y: x + y, map(self.get_filesize, iterate(files)), 0)",Calculate the total size of the given files "def perm(A, p): """""" Symmetric permutation of a symmetric sparse matrix. :param A: :py:class:`spmatrix` :param p: :py:class:`matrix` or :class:`list` of length `A.size[0]` """""" assert isinstance(A,spmatrix), ""argument must be a sparse matrix"" assert A.size[0] == A.size[1], ""A must be a square matrix"" assert A.size[0] == len(p), ""length of p must be equal to the order of A"" return A[p,p]","Symmetric permutation of a symmetric sparse matrix. :param A: :py:class:`spmatrix` :param p: :py:class:`matrix` or :class:`list` of length `A.size[0]`" "def count_mismatches_before_variant(reference_prefix, cdna_prefix): """""" Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus. Parameters ---------- reference_prefix : str cDNA sequence of a reference transcript before a variant locus cdna_prefix : str cDNA sequence detected from RNAseq before a variant locus """""" if len(reference_prefix) != len(cdna_prefix): raise ValueError( ""Expected reference prefix '%s' to be same length as %s"" % ( reference_prefix, cdna_prefix)) return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix))","Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus. Parameters ---------- reference_prefix : str cDNA sequence of a reference transcript before a variant locus cdna_prefix : str cDNA sequence detected from RNAseq before a variant locus" "def list_sub_commmands(self, cmd_name, cmd): """"""Return all commands for a group"""""" ret = {} if isinstance(cmd, click.core.Group): for sub_cmd_name in cmd.commands: sub_cmd = cmd.commands[sub_cmd_name] sub = self.list_sub_commmands(sub_cmd_name, sub_cmd) if sub: if isinstance(sub, dict): for n, c in sub.items(): ret['%s %s' % (cmd_name, n)] = c else: ret['%s %s' % (cmd_name, sub[0])] = sub[1] elif isinstance(cmd, click.core.Command): return (cmd.name, cmd) return ret",Return all commands for a group "def geom2localortho(geom): """"""Convert existing geom to local orthographic projection Useful for local cartesian distance/area calculations """""" cx, cy = geom.Centroid().GetPoint_2D() lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs) local_srs = localortho(lon,lat) local_geom = geom_dup(geom) geom_transform(local_geom, local_srs) return local_geom","Convert existing geom to local orthographic projection Useful for local cartesian distance/area calculations" "def maf_somatic_variant_stats(variant, variant_metadata): """""" Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats """""" tumor_stats = None normal_stats = None if ""t_ref_count"" in variant_metadata: tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix=""t"") if ""n_ref_count"" in variant_metadata: normal_stats = _maf_variant_stats(variant, variant_metadata, prefix=""n"") return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)","Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats" "def get_filter_kwargs(self): """""" Translates the cleaned data into a dictionary that can used to generate the filter removing blank values. """""" if self.is_valid(): filter_kwargs = {} for field in self.get_filter_fields(): empty_values = EMPTY_VALUES if hasattr(self.fields[field], 'empty_values'): empty_values = self.fields[field].empty_values value = self.cleaned_data.get(field) if not value in empty_values: if self.search_fields and field in self.search_fields: filter_kwargs[""%s__icontains"" % field] = value else: filter_kwargs[field] = value return filter_kwargs else: return {}","Translates the cleaned data into a dictionary that can used to generate the filter removing blank values." "def connect(*args, **kwargs): """"""Connect to the database. Passes arguments along to ``pymongo.connection.Connection`` unmodified. The Connection returned by this proxy method will be used by micromongo for all of its queries. Micromongo will alter the behavior of this conneciton object in some subtle ways; if you want a clean one, call ``micromongo.clean_connection`` after connecting."""""" global __connection, __connection_args __connection_args = (args, dict(kwargs)) # inject our class_router kwargs['class_router'] = class_router __connection = Connection(*args, **kwargs) return __connection","Connect to the database. Passes arguments along to ``pymongo.connection.Connection`` unmodified. The Connection returned by this proxy method will be used by micromongo for all of its queries. Micromongo will alter the behavior of this conneciton object in some subtle ways; if you want a clean one, call ``micromongo.clean_connection`` after connecting." "def get_print_setup(self, print_data): """"""Opens print setup dialog and returns print_data"""""" psd = wx.PageSetupDialogData(print_data) # psd.EnablePrinter(False) psd.CalculatePaperSizeFromId() dlg = wx.PageSetupDialog(self.main_window, psd) dlg.ShowModal() # this makes a copy of the wx.PrintData instead of just saving # a reference to the one inside the PrintDialogData that will # be destroyed when the dialog is destroyed data = dlg.GetPageSetupData() new_print_data = wx.PrintData(data.GetPrintData()) new_print_data.PaperId = data.PaperId new_print_data.PaperSize = data.PaperSize dlg.Destroy() return new_print_data",Opens print setup dialog and returns print_data "def _find_start_time(hdr, s_freq): """"""Find the start time, usually in STC, but if that's not correct, use ERD Parameters ---------- hdr : dict header with stc (and stamps) and erd s_freq : int sampling frequency Returns ------- datetime either from stc or from erd Notes ----- Sometimes, but rather rarely, there is a mismatch between the time in the stc and the time in the erd. For some reason, the time in the stc is way off (by hours), which is clearly not correct. We can try to reconstruct the actual time, but looking at the ERD time (of any file apart from the first one) and compute the original time back based on the offset of the number of samples in stc. For some reason, this is not the same for all the ERD, but the jitter is in the order of 1-2s which is acceptable for our purposes (probably, but be careful about the notes). """""" start_time = hdr['stc']['creation_time'] for one_stamp in hdr['stamps']: if one_stamp['segment_name'].decode() == hdr['erd']['filename']: offset = one_stamp['start_stamp'] break erd_time = (hdr['erd']['creation_time'] - timedelta(seconds=offset / s_freq)).replace(microsecond=0) stc_erd_diff = (start_time - erd_time).total_seconds() if stc_erd_diff > START_TIME_TOL: lg.warn('Time difference between ERD and STC is {} s so using ERD time' ' at {}'.format(stc_erd_diff, erd_time)) start_time = erd_time return start_time","Find the start time, usually in STC, but if that's not correct, use ERD Parameters ---------- hdr : dict header with stc (and stamps) and erd s_freq : int sampling frequency Returns ------- datetime either from stc or from erd Notes ----- Sometimes, but rather rarely, there is a mismatch between the time in the stc and the time in the erd. For some reason, the time in the stc is way off (by hours), which is clearly not correct. We can try to reconstruct the actual time, but looking at the ERD time (of any file apart from the first one) and compute the original time back based on the offset of the number of samples in stc. For some reason, this is not the same for all the ERD, but the jitter is in the order of 1-2s which is acceptable for our purposes (probably, but be careful about the notes)." "def _create_filters(col_params, extractors): """"""Creates filters for the given col_params. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. extractors: list of extractor functions of the same length as col_params. Each element should extract the column described by the corresponding element of col_params. Returns: A list of filter functions. Each corresponding to a single col_params.filter oneof field of _request """""" result = [] for col_param, extractor in zip(col_params, extractors): a_filter = _create_filter(col_param, extractor) if a_filter: result.append(a_filter) return result","Creates filters for the given col_params. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. extractors: list of extractor functions of the same length as col_params. Each element should extract the column described by the corresponding element of col_params. Returns: A list of filter functions. Each corresponding to a single col_params.filter oneof field of _request" "def get_outfilename(url, domain=None): """"""Construct the output filename from domain and end of path."""""" if domain is None: domain = get_domain(url) path = '{url.path}'.format(url=urlparse(url)) if '.' in path: tail_url = path.split('.')[-2] else: tail_url = path if tail_url: if '/' in tail_url: tail_pieces = [x for x in tail_url.split('/') if x] tail_url = tail_pieces[-1] # Keep length of return string below or equal to max_len max_len = 24 if domain: max_len -= (len(domain) + 1) if len(tail_url) > max_len: if '-' in tail_url: tail_pieces = [x for x in tail_url.split('-') if x] tail_url = tail_pieces.pop(0) if len(tail_url) > max_len: tail_url = tail_url[:max_len] else: # Add as many tail pieces that can fit tail_len = 0 for piece in tail_pieces: tail_len += len(piece) if tail_len <= max_len: tail_url += '-' + piece else: break else: tail_url = tail_url[:max_len] if domain: return '{0}-{1}'.format(domain, tail_url).lower() return tail_url return domain.lower()",Construct the output filename from domain and end of path. "def vupack(v): """""" Unpack three scalar components from a vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vupack_c.html :param v: Vector :type v: 3-Element Array of floats :return: (x, y, z) :rtype: tuple """""" v1 = stypes.toDoubleVector(v) x = ctypes.c_double() y = ctypes.c_double() z = ctypes.c_double() libspice.vupack_c(v1, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z)) return x.value, y.value, z.value","Unpack three scalar components from a vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vupack_c.html :param v: Vector :type v: 3-Element Array of floats :return: (x, y, z) :rtype: tuple" "def _param_deprecation_warning(schema, deprecated, context): """"""Raises warning about using the 'old' names for some parameters. The new naming scheme just has two underscores on each end of the word for consistency """""" for i in deprecated: if i in schema: msg = 'When matching {ctx}, parameter {word} is deprecated, use __{word}__ instead' msg = msg.format(ctx = context, word = i) warnings.warn(msg, Warning)","Raises warning about using the 'old' names for some parameters. The new naming scheme just has two underscores on each end of the word for consistency" "def save_m2m(self, obj, data, using_transactions, dry_run): """""" Saves m2m fields. Model instance need to have a primary key value before a many-to-many relationship can be used. """""" if not using_transactions and dry_run: # we don't have transactions and we want to do a dry_run pass else: for field in self.get_import_fields(): if not isinstance(field.widget, widgets.ManyToManyWidget): continue self.import_field(field, obj, data, True)","Saves m2m fields. Model instance need to have a primary key value before a many-to-many relationship can be used." "def _get_system(model_folder): """"""Return the preprocessing description, the feature description and the model description."""""" # Get model description model_description_file = os.path.join(model_folder, ""info.yml"") if not os.path.isfile(model_description_file): logging.error(""You are probably not in the folder of a model, because "" ""%s is not a file. (-m argument)"", model_description_file) sys.exit(-1) with open(model_description_file, 'r') as ymlfile: model_desc = yaml.load(ymlfile) # Get the feature and the preprocessing description feature_desc = _get_description(model_desc) preprocessing_desc = _get_description(feature_desc) return (preprocessing_desc, feature_desc, model_desc)","Return the preprocessing description, the feature description and the model description."