text
stringlengths
78
104k
score
float64
0
0.18
def restricted_to_files(self, filenames: List[str] ) -> 'Spectra': """ Returns a variant of this spectra that only contains entries for lines that appear in any of the files whose name appear in the given list. """ tally_passing = \ {fn: entries for (fn, entries) in self.__tally_passing.items() \ if fn in filenames} tally_failing = \ {fn: entries for (fn, entries) in self.__tally_failing.items() \ if fn in filenames} return Spectra(self.__num_passing, self.__num_failing, tally_passing, tally_failing)
0.008065
def get_cpu_info(): ''' Returns the CPU info by using the best sources of information for your OS. Returns the result in a dict ''' import json output = get_cpu_info_json() # Convert JSON to Python with non unicode strings output = json.loads(output, object_hook = _utf_to_str) return output
0.039474
def sendcmd(self, cmd='AT', timeout=1.0): """send command, wait for response. returns response from modem.""" import time if self.write(cmd): while self.get_response() == '' and timeout > 0: time.sleep(0.1) timeout -= 0.1 return self.get_lines()
0.006231
def check_dimensionless_vertical_coordinate(self, ds): ''' Check the validity of dimensionless coordinates under CF CF §4.3.2 The units attribute is not required for dimensionless coordinates. The standard_name attribute associates a coordinate with its definition from Appendix D, Dimensionless Vertical Coordinates. The definition provides a mapping between the dimensionless coordinate values and dimensional values that can positively and uniquely indicate the location of the data. A new attribute, formula_terms, is used to associate terms in the definitions with variables in a netCDF file. To maintain backwards compatibility with COARDS the use of these attributes is not required, but is strongly recommended. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] z_variables = cfutil.get_z_variables(ds) deprecated_units = [ 'level', 'layer', 'sigma_level' ] for name in z_variables: variable = ds.variables[name] standard_name = getattr(variable, 'standard_name', None) units = getattr(variable, 'units', None) formula_terms = getattr(variable, 'formula_terms', None) # Skip the variable if it's dimensional if (formula_terms is None and standard_name not in dimless_vertical_coordinates): continue is_not_deprecated = TestCtx(BaseCheck.LOW, self.section_titles["4.3"]) is_not_deprecated.assert_true(units not in deprecated_units, "§4.3.2: units are deprecated by CF in variable {}: {}" "".format(name, units)) ret_val.append(is_not_deprecated.to_result()) ret_val.append(self._check_formula_terms(ds, name)) return ret_val
0.002438
def from_(self, pct_pts): """Reverse of :meth:`to_`.""" pct_pts = np.asarray(pct_pts, dtype=np.float) has_z = (pct_pts.shape[-1] > 2) max_pt = list(self.viewer.get_window_size()) if has_z: max_pt.append(0.0) win_pts = np.multiply(pct_pts, max_pt) # round to pixel units, if asked if self.as_int: win_pts = np.rint(win_pts).astype(np.int, copy=False) return win_pts
0.00431
def normpdf(x, mu, sigma): """ Describes the relative likelihood that a real-valued random variable X will take on a given value. http://en.wikipedia.org/wiki/Probability_density_function """ u = (x-mu)/abs(sigma) y = (1/(math.sqrt(2*pi)*abs(sigma)))*math.exp(-u*u/2) return y
0.00639
def split_full_path(path): """Return pair of bucket without protocol and path Arguments: path - valid S3 path, such as s3://somebucket/events >>> split_full_path('s3://mybucket/path-to-events') ('mybucket', 'path-to-events/') >>> split_full_path('s3://mybucket') ('mybucket', None) >>> split_full_path('s3n://snowplow-bucket/some/prefix/') ('snowplow-bucket', 'some/prefix/') """ if path.startswith('s3://'): path = path[5:] elif path.startswith('s3n://'): path = path[6:] elif path.startswith('s3a://'): path = path[6:] else: raise ValueError("S3 path should start with s3://, s3n:// or " "s3a:// prefix") parts = path.split('/') bucket = parts[0] path = '/'.join(parts[1:]) return bucket, normalize_prefix(path)
0.001185
def check(self): """ Checks the status of the stop exposure command This is run in background and can take a few seconds """ g = get_root(self).globals if self.stopped_ok: # Exposure stopped OK; modify buttons self.disable() # try and write FITS table before enabling start button, otherwise # a new start will clear table try: insertFITSHDU(g) except Exception as err: g.clog.warn('Could not add FITS Table to run') g.clog.warn(str(err)) g.observe.start.enable() g.setup.powerOn.disable() g.setup.powerOff.enable() # Report that run has stopped g.clog.info('Run stopped') # enable idle mode now run has stopped g.clog.info('Setting chips to idle') idle = {'appdata': {'app': 'Idle'}} try: success = postJSON(g, idle) if not success: raise Exception('postJSON returned false') except Exception as err: g.clog.warn('Failed to enable idle mode') g.clog.warn(str(err)) g.clog.info('Stopping offsets (if running') try: success = stopNodding(g) if not success: raise Exception('Failed to stop dithering: response was false') except Exception as err: g.clog.warn('Failed to stop GTC offset script') g.clog.warn(str(err)) return True elif self.stopping: # Exposure in process of stopping # Disable lots of buttons self.disable() g.observe.start.disable() g.setup.powerOn.disable() g.setup.powerOff.disable() # wait a second before trying again self.after(500, self.check) else: self.enable() g.observe.start.disable() g.setup.powerOn.disable() g.setup.powerOff.disable() # Start exposure meter g.info.timer.start() return False
0.001346
def update(self): """Update |WZ| based on |RelWZ| and |NFk|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> lnk(ACKER) >>> relwz(0.8) >>> nfk(100.0, 200.0) >>> derived.wz.update() >>> derived.wz wz(80.0, 160.0) """ con = self.subpars.pars.control self(con.relwz*con.nfk)
0.004938
def make_wrapper(self, callable_): """Given a free-standing function 'callable', return a new callable that will call 'callable' and report all exceptins, using 'call_and_report_errors'.""" assert callable(callable_) def wrapper(*args, **kw): return self.call_and_report_errors(callable_, *args, **kw) return wrapper
0.007979
def restart(self, key): """Restart a previously finished entry.""" if key in self.queue: if self.queue[key]['status'] in ['failed', 'done']: new_entry = {'command': self.queue[key]['command'], 'path': self.queue[key]['path']} self.add_new(new_entry) self.write() return True return False
0.004808
def is_valid_vpnv4_prefix(prefix): """Returns True if given prefix is a string represent vpnv4 prefix. Vpnv4 prefix is made up of RD:Ipv4, where RD is represents route distinguisher and Ipv4 represents valid dot-decimal ipv4 notation string. """ if not isinstance(prefix, str): return False # Split the prefix into route distinguisher and IP tokens = prefix.split(':', 2) if len(tokens) != 3: return False # Validate route distinguisher if not is_valid_route_dist(':'.join([tokens[0], tokens[1]])): return False # Validate IPv4 prefix and return return is_valid_ipv4_prefix(tokens[2])
0.001515
def _read_extensions(self, context): """Return list of extensions as str to be passed on to the Jinja2 env. If context does not contain the relevant info, return an empty list instead. """ try: extensions = context['cookiecutter']['_extensions'] except KeyError: return [] else: return [str(ext) for ext in extensions]
0.004866
def _program_dcnm_static_route(self, tenant_id, tenant_name): """Program DCNM Static Route. """ in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') in_ip = in_ip_dict.get('subnet') if in_gw is None: LOG.error("No FW service GW present") return False out_ip_dict = self.get_out_ip_addr(tenant_id) out_ip = out_ip_dict.get('subnet') # Program DCNM to update profile's static IP address on OUT part excl_list = [] excl_list.append(in_ip) excl_list.append(out_ip) subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list, excl_part=True) # This count is for telling DCNM to insert the static route in a # particular position. Total networks created - exclusive list as # above - the network that just got created. srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id) ret = self.dcnm_obj.update_partition_static_route( tenant_name, fw_const.SERV_PART_NAME, subnet_lst, vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile, service_node_ip=srvc_node_ip) if not ret: LOG.error("Unable to update DCNM ext profile with static " "route") return False return True
0.001417
def sample_stats_to_xarray(self): """Extract sample_stats from tfp trace.""" if self.model_fn is None or self.observed is None: return None log_likelihood = [] sample_size = self.posterior[0].shape[0] for i in range(sample_size): variables = {} for var_i, var_name in enumerate(self.var_names): variables[var_name] = self.posterior[var_i][i] with self.ed.interception(self._value_setter(variables)): log_likelihood.append((self.model_fn().distribution.log_prob(self.observed))) data = {} if self.dims is not None: coord_name = self.dims.get("obs") else: coord_name = None dims = {"log_likelihood": coord_name} with self.tf.Session() as sess: data["log_likelihood"] = np.expand_dims( sess.run(log_likelihood, feed_dict=self.feed_dict), axis=0 ) return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)
0.003687
def _get_dict_default(obj, key): """ obj MUST BE A DICT key IS EXPECTED TO BE LITERAL (NO ESCAPING) TRY BOTH ATTRIBUTE AND ITEM ACCESS, OR RETURN Null """ try: return obj[key] except Exception as f: pass try: if float(key) == round(float(key), 0): return obj[int(key)] except Exception as f: pass return NullType(obj, key)
0.002451
def _create_fit_summary(self): """ Create and store a pandas series that will display to users the various statistics/values that indicate how well the estimated model fit the given dataset. Returns ------- None. """ # Make sure we have all attributes needed to create the results summary needed_attributes = ["df_model", "nobs", "null_log_likelihood", "log_likelihood", "rho_squared", "rho_bar_squared", "estimation_message"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([getattr(self, attr) is not None for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) self.fit_summary = pd.Series([self.df_model, self.nobs, self.null_log_likelihood, self.log_likelihood, self.rho_squared, self.rho_bar_squared, self.estimation_message], index=["Number of Parameters", "Number of Observations", "Null Log-Likelihood", "Fitted Log-Likelihood", "Rho-Squared", "Rho-Bar-Squared", "Estimation Message"]) return None
0.001007
def sum_mags(mags, weights=None): """ Sum an array of magnitudes in flux space. Parameters: ----------- mags : array of magnitudes weights : array of weights for each magnitude (i.e. from a pdf) Returns: -------- sum_mag : the summed magnitude of all the stars """ flux = 10**(-np.asarray(mags) / 2.5) if weights is None: return -2.5 * np.log10(np.sum(flux)) else: return -2.5 * np.log10(np.sum(weights*flux))
0.004115
def website_exists_as_secure(self, website): """" Return true if the website has an equivalent that is secure we will have 2 websites with the same name, one insecure (that will contain just the redirect and the identity-verification) and one secured """ if website['https']: logger.info("website %s is already secured, skip" % website['name']) return website # changes in these fields are ignored for other in self._websites: if other['id'] == website['id']: continue if other['name'] == website['name'] and other['https']: return other return None
0.005706
def publish(self, value): """ Accepts: int, long Returns: int, long """ value = super(Integer, self).publish(value) if isinstance(value, float): value = int(value) if not isinstance(value, (int, long)): raise ValueError("Not an integer: %r" % (value, )) return value
0.00565
def detect_mode(term_hint="xterm-256color"): """Poor-mans color mode detection.""" if "ANSICON" in os.environ: return 16 elif os.environ.get("ConEmuANSI", "OFF") == "ON": return 256 else: term = os.environ.get("TERM", term_hint) if term.endswith("-256color") or term in ("xterm", "screen"): return 256 elif term.endswith("-color") or term in ("rxvt",): return 16 else: return 256
0.002088
def parse(self): """Parse the data.""" if self._filename: with open(self._filename) as ifile: self._data = ifile.read() with QasmParser(self._filename) as qasm_p: qasm_p.parse_debug(False) return qasm_p.parse(self._data)
0.006734
def register_signals(self): """Register signals.""" from .models import Collection from .receivers import CollectionUpdater if self.app.config['COLLECTIONS_USE_PERCOLATOR']: from .percolator import collection_inserted_percolator, \ collection_removed_percolator, \ collection_updated_percolator # Register collection signals to update percolators listen(Collection, 'after_insert', collection_inserted_percolator) listen(Collection, 'after_update', collection_updated_percolator) listen(Collection, 'after_delete', collection_removed_percolator) # Register Record signals to update record['_collections'] self.update_function = CollectionUpdater(app=self.app) signals.before_record_insert.connect(self.update_function, weak=False) signals.before_record_update.connect(self.update_function, weak=False)
0.00181
def _list_tables(self, max_results=None, marker=None, timeout=None): ''' Returns a list of tables under the specified account. Makes a single list request to the service. Used internally by the list_tables method. :param int max_results: The maximum number of tables to return. A single list request may return up to 1000 tables and potentially a continuation token which should be followed to get additional resutls. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param int timeout: The server timeout, expressed in seconds. :return: A list of tables, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.models.table.Table`: ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/Tables' request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)] request.query = [ ('$top', _int_to_str(max_results)), ('NextTableName', _to_str(marker)), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_json_response_to_tables(response)
0.004065
def rename_annotations(self, sentence): """Function that renames and restructures clause information.""" annotations = [] for token in sentence: data = {CLAUSE_IDX: token[CLAUSE_IDX]} if CLAUSE_ANNOT in token: if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END annotations.append(data) return annotations
0.002894
def _paths_must_exists(path): """ Raises error if path doesn't exist. :param path: str path to check :return: str same path passed in """ path = to_unicode(path) if not os.path.exists(path): raise argparse.ArgumentTypeError("{} is not a valid file/folder.".format(path)) return path
0.006211
def _satisfyVersionByInstallingVersion(name, version_required, working_directory, version, type='module', inherit_shrinkwrap=None): ''' installs and returns a Component/Target for the specified version requirement into 'working_directory' using the provided remote version object. This function is not normally called via `satisfyVersionByInstalling', which looks up a suitable remote version object. ''' assert(version) logger.info('download %s', version) version.unpackInto(working_directory) r = _clsForType(type)(working_directory, inherit_shrinkwrap = inherit_shrinkwrap) if not r: raise Exception( 'Dependency "%s":"%s" is not a valid %s.' % (name, version_required, type) ) if name != r.getName(): raise Exception('%s %s (specification %s) has incorrect name %s' % ( type, name, version_required, r.getName() )) # error code deliberately ignored here for now, it isn't clear what the # behaviour should be (abort? remove the unpacked state then abort? # continue?) r.runScript('postInstall') return r
0.006146
def pause(self): """Pause the sampler. Sampling can be resumed by calling `icontinue`. """ self.status = 'paused' # The _loop method will react to 'paused' status and stop looping. if hasattr( self, '_sampling_thread') and self._sampling_thread.isAlive(): print_('Waiting for current iteration to finish...') while self._sampling_thread.isAlive(): sleep(.1)
0.004415
def create_keyspace_network_topology(name, dc_replication_map, durable_writes=True, connections=None): """ Creates a keyspace with NetworkTopologyStrategy for replica placement If the keyspace already exists, it will not be modified. **This function should be used with caution, especially in production environments. Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** *There are plans to guard schema-modifying functions with an environment-driven conditional.* :param str name: name of keyspace to create :param dict dc_replication_map: map of dc_names: replication_factor :param bool durable_writes: Write log is bypassed if set to False :param list connections: List of connection names """ _create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map, connections=connections)
0.006565
def imei(number): ''' Printable International Mobile Station Equipment Identity (IMEI) numbers. :param number: string or int >>> print(imei(12345678901234)) 12-345678-901234-7 >>> print(imei(1234567890123456)) 12-345678-901234-56 ''' number = to_decimal(number) length = len(number) if length not in (14, 15, 16): raise ValueError( _('Invaid International Mobile Station Equipment Identity') ) if len(number) == 14: # Add Luhn check digit number = luhn_append(number) groups = (number[:2], number[2:8], number[8:14], number[14:]) return u'-'.join(list(filter(None, groups)))
0.001475
def normal_cdf(x, mu=0, sigma=1): """Cumulative Normal Distribution Function. :param x: scalar or array of real numbers. :type x: numpy.ndarray, float :param mu: Mean value. Default 0. :type mu: float, numpy.ndarray :param sigma: Standard deviation. Default 1. :type sigma: float :returns: An approximation of the cdf of the normal. :rtype: numpy.ndarray Note: CDF of the normal distribution is defined as \frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R Source: http://en.wikipedia.org/wiki/Normal_distribution """ arg = (x - mu) / (sigma * numpy.sqrt(2)) res = (1 + erf(arg)) / 2 return res
0.008696
def _valid_atoms(model, expression): """Check whether a sympy expression references the correct variables. Parameters ---------- model : cobra.Model The model in which to check for variables. expression : sympy.Basic A sympy expression. Returns ------- boolean True if all referenced variables are contained in model, False otherwise. """ atoms = expression.atoms(optlang.interface.Variable) return all(a.problem is model.solver for a in atoms)
0.001916
def insertions_from_masked(seq): """ get coordinates of insertions from insertion-masked sequence """ insertions = [] prev = True for i, base in enumerate(seq): if base.isupper() and prev is True: insertions.append([]) prev = False elif base.islower(): insertions[-1].append(i) prev = True return [[min(i), max(i)] for i in insertions if i != []]
0.002283
def to_tile(self, zoom): """ Converts EPSG:900913 to tile coordinates in given zoom level """ p1 = self.min.to_tile(zoom) p2 = self.max.to_tile(zoom) return GridBB(zoom, p1.x, p2.y, p2.x, p1.y)
0.008889
def column_signs_(self): """ Return a numpy array with expected signs of features. Values are * +1 when all known terms which map to the column have positive sign; * -1 when all known terms which map to the column have negative sign; * ``nan`` when there are both positive and negative known terms for this column, or when there is no known term which maps to this column. """ if self._always_positive(): return np.ones(self.n_features) self.unhasher.recalculate_attributes() return self.unhasher.column_signs_
0.00321
def p_continue_statement_2(self, p): """continue_statement : CONTINUE identifier SEMI | CONTINUE identifier AUTOSEMI """ p[0] = self.asttypes.Continue(p[2]) p[0].setpos(p)
0.008584
def find_eigen(hint=None): r''' Try to find the Eigen library. If successful the include directory is returned. ''' # search with pkgconfig # --------------------- try: import pkgconfig if pkgconfig.installed('eigen3','>3.0.0'): return pkgconfig.parse('eigen3')['include_dirs'][0] except: pass # manual search # ------------- search_dirs = [] if hint is None else hint search_dirs += [ "/usr/local/include/eigen3", "/usr/local/homebrew/include/eigen3", "/opt/local/var/macports/software/eigen3", "/opt/local/include/eigen3", "/usr/include/eigen3", "/usr/include/local", "/usr/include", ] for d in search_dirs: path = os.path.join(d, "Eigen", "Dense") if os.path.exists(path): vf = os.path.join(d, "Eigen", "src", "Core", "util", "Macros.h") if not os.path.exists(vf): continue src = open(vf, "r").read() v1 = re.findall("#define EIGEN_WORLD_VERSION (.+)", src) v2 = re.findall("#define EIGEN_MAJOR_VERSION (.+)", src) v3 = re.findall("#define EIGEN_MINOR_VERSION (.+)", src) if not len(v1) or not len(v2) or not len(v3): continue v = "{0}.{1}.{2}".format(v1[0], v2[0], v3[0]) print("Found Eigen version {0} in: {1}".format(v, d)) return d return None
0.019011
def create_instance(self, image_id, pem_file, group_ids, instance_type, volume_type='gp2', ebs_optimized=False, instance_monitoring=False, iam_profile='', tag_list=None, auction_bid=0.0): ''' a method for starting an instance on AWS EC2 :param image_id: string with aws id of image for instance :param pem_file: string with path to pem file to access image :param group_ids: list with aws id of security group(s) to attach to instance :param instance_type: string with type of instance resource to use :param volume_type: string with type of on-disk storage :param ebs_optimized: [optional] boolean to activate ebs optimization :param instance_monitoring: [optional] boolean to active instance monitoring :param iam_profile: [optional] string with name of iam instance profile role :param tag_list: [optional] list of single key-pair tags for instance :param auction_bid: [optional] float with dollar amount to bid for instance hour :return: string with id of instance ''' title = '%s.create_instance' % self.__class__.__name__ # validate inputs input_fields = { 'image_id': image_id, 'pem_file': pem_file, 'group_ids': group_ids, 'instance_type': instance_type, 'volume_type': volume_type, 'iam_profile': iam_profile, 'tag_list': tag_list, 'auction_bid': auction_bid } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # print warning about auction if auction_bid: self.iam.printer('[WARNING]: auction bidding is not yet available.') # turn off verbosity self.iam.printer_on = False # verify existence of image try: self.read_image(image_id) except: raise ValueError('Image %s does not exist in EC2 account or permission scope.') # verify existence of security group group_list = self.list_security_groups() for id in group_ids: if id not in group_list: raise ValueError('Security group %s does not exist in EC2 account.' % id) # verify existence of iam profile if iam_profile: if not iam_profile in self.iam.list_roles(): raise ValueError('Iam instance profile %s does not exist in IAM account.' % iam_profile) # validate path to pem file from os import path if not path.exists(pem_file): raise ValueError('%s is not a valid path on localhost.' % pem_file) # verify existence of pem name pem_absolute = path.abspath(pem_file) pem_root, pem_ext = path.splitext(pem_absolute) pem_path, pem_name = path.split(pem_root) if not pem_name in self.list_keypairs(): raise ValueError('Pem file name %s does not exist in EC2 account.' % pem_name) # turn on verbosity self.iam.printer_on = True # create client token and timestamp for instance from labpack.records.id import labID record_id = labID() client_token = 'CT-%s' % record_id.id36 from labpack.records.time import labDT timestamp = labDT.new().zulu() # construct tag list if not tag_list: tag_list = [] for tag in tag_list: if tag['key'] == 'BuildDate': tag['value'] = timestamp # create keyword argument definitions kw_args = { 'DryRun': False, 'ImageId': image_id, 'MinCount': 1, 'MaxCount': 1, 'KeyName': pem_name, 'SecurityGroupIds': group_ids, 'InstanceType': instance_type, 'ClientToken': client_token, 'Monitoring': { 'Enabled': instance_monitoring }, 'EbsOptimized': ebs_optimized, 'BlockDeviceMappings': [] } kw_args['BlockDeviceMappings'].append( { "DeviceName": "/dev/xvda", "Ebs": { "VolumeType": volume_type } } ) if iam_profile: kw_args['IamInstanceProfile'] = { 'Name': iam_profile } # start instance on aws self.iam.printer('Initiating instance of image %s.' % image_id) try: response = self.connection.run_instances(**kw_args) except Exception as err: if str(err).find('non-VPC'): self.iam.printer('Default VPC Error Detected!\nAttempting to add Subnet declaration.') group_details = self.read_security_group(group_ids[0]) env_type = '' for tag in group_details['tags']: if tag['Key'] == 'Env': env_type = tag['Value'] if env_type: subnet_list = self.list_subnets(tag_values=[env_type]) else: subnet_list = self.list_subnets() error_msg = '%s requires a Subnet match the Security Group %s' % (title, group_ids[0]) if not subnet_list: raise AWSConnectionError(error_msg) subnet_id = '' for subnet in subnet_list: subnet_details = self.read_subnet(subnet) if subnet_details['vpc_id'] == group_details['vpc_id']: subnet_id = subnet if not subnet_id: raise AWSConnectionError(error_msg) kw_args['SubnetId'] = subnet_id try: response = self.connection.run_instances(**kw_args) except: raise AWSConnectionError('%s(%s)' % (title, kw_args)) else: raise AWSConnectionError('%s(%s)' % (title, kw_args)) # parse instance id from response instance_id = '' instance_list = response['Instances'] for i in range(0, len(instance_list)): if instance_list[i]['ClientToken'] == client_token: instance_id = instance_list[i]['InstanceId'] if instance_id: self.iam.printer('Instance %s has been initiated.' % instance_id) else: raise Exception('Failure creating instance from image %s.' % image_id) # tag instance with instance tags self.tag_instance(instance_id, tag_list) return instance_id
0.005733
def process_response(self): """ Parses an HTTP response after an HTTP request is sent """ split_response = self.response.split(self.CRLF) response_line = split_response[0] response_headers = {} response_data = None data_line = None for line_num in range(1, len(split_response[1:])): # CRLF represents the start of data if split_response[line_num] == '': data_line = line_num + 1 break else: # Headers are all split by ':' header = split_response[line_num].split(':', 1) if len(header) != 2: raise errors.TestError( 'Did not receive a response with valid headers', { 'header_rcvd': str(header), 'function': 'http.HttpResponse.process_response' }) response_headers[header[0].lower()] = header[1].lstrip() if 'set-cookie' in response_headers.keys(): try: cookie = Cookie.SimpleCookie() cookie.load(response_headers['set-cookie']) except Cookie.CookieError as err: raise errors.TestError( 'Error processing the cookie content into a SimpleCookie', { 'msg': str(err), 'set_cookie': str(response_headers['set-cookie']), 'function': 'http.HttpResponse.process_response' }) # if the check_for_cookie is invalid then we don't save it if self.check_for_cookie(cookie) is False: raise errors.TestError( 'An invalid cookie was specified', { 'set_cookie': str(response_headers['set-cookie']), 'function': 'http.HttpResponse.process_response' }) else: self.cookiejar.append((cookie, self.dest_addr)) if data_line is not None and data_line < len(split_response): response_data = self.CRLF.join(split_response[data_line:]) # if the output headers say there is encoding if 'content-encoding' in response_headers.keys(): response_data = self.parse_content_encoding( response_headers, response_data) if len(response_line.split(' ', 2)) != 3: raise errors.TestError( 'The HTTP response line returned the wrong args', { 'response_line': str(response_line), 'function': 'http.HttpResponse.process_response' }) try: self.status = int(response_line.split(' ', 2)[1]) except ValueError: raise errors.TestError( 'The status num of the response line isn\'t convertable', { 'msg': 'This may be an HTTP 1.0 \'Simple Req\\Res\', it \ doesn\'t have HTTP headers and FTW will not parse these', 'response_line': str(response_line), 'function': 'http.HttpResponse.process_response' }) self.status_msg = response_line.split(' ', 2)[2] self.version = response_line.split(' ', 2)[0] self.response_line = response_line self.headers = response_headers self.data = response_data
0.000562
def set(cls, prop, value): """ Set the value of the given configuration property. :param prop: (string) name of the property :param value: (object) value to set """ if cls._properties is None: cls._readStdConfigFiles() cls._properties[prop] = str(value)
0.006993
def describe(self, tablename, refresh=False, metrics=False, require=False): """ Get the :class:`.TableMeta` for a table """ table = self.cached_descriptions.get(tablename) if refresh or table is None or (metrics and not table.consumed_capacity): desc = self.connection.describe_table(tablename) if desc is None: if require: raise RuntimeError("Table %r not found" % tablename) else: return None table = TableMeta.from_description(desc) self.cached_descriptions[tablename] = table if metrics: read, write = self.get_capacity(tablename) table.consumed_capacity["__table__"] = {"read": read, "write": write} for index_name in table.global_indexes: read, write = self.get_capacity(tablename, index_name) table.consumed_capacity[index_name] = {"read": read, "write": write} return table
0.004836
def delete_collection_percolator(target): """Delete percolator associated with the new collection. :param target: Collection where the percolator was attached. """ for name in current_search.mappings.keys(): if target.name and target.dbquery: current_search.client.delete( index=name, doc_type='.percolator', id='collection-{}'.format(target.name), ignore=[404] )
0.002101
def datetime_to_iso(date, only_date=True): """ Convert datetime format to ISO 8601 time format This function converts a date in datetime instance, e.g. ``datetime.datetime(2017,9,14,0,0)`` to ISO format, e.g. ``2017-09-14`` :param date: datetime instance to convert :type date: datetime :param only_date: whether to return date only or also time information. Default is ``True`` :type only_date: bool :return: date in ISO 8601 format :rtype: str """ if only_date: return date.isoformat().split('T')[0] return date.isoformat()
0.005146
def set_color(fg=None, bg=None): """Set the current colors. If no arguments are given, sets default colors. """ if fg or bg: _color_manager.set_color(fg, bg) else: _color_manager.set_defaults()
0.004329
def random_string(length): ''' Generate random string with parameter length. Example: >>> from eggit.egg_string import random_string >>> random_string(8) 'q4f2eaT4' >>> ''' str_list = [random.choice(string.digits + string.ascii_letters) for i in range(length)] return ''.join(str_list)
0.005814
def write_report(self, force=False): ''' Writes the report to a file. ''' path = self.title + '.html' value = self._template.format( title=self.title, body=self.body, sidebar=self.sidebar) write_file(path, value, force=force) plt.ion()
0.006601
def class_traits(cls, **metadata): """Get a list of all the traits of this class. This method is just like the :meth:`traits` method, but is unbound. The TraitTypes returned don't know anything about the values that the various HasTrait's instances are holding. This follows the same algorithm as traits does and does not allow for any simple way of specifying merely that a metadata name exists, but has any value. This is because get_metadata returns None if a metadata key doesn't exist. """ traits = dict([memb for memb in getmembers(cls) if \ isinstance(memb[1], TraitType)]) if len(metadata) == 0: return traits for meta_name, meta_eval in metadata.items(): if type(meta_eval) is not FunctionType: metadata[meta_name] = _SimpleTest(meta_eval) result = {} for name, trait in traits.items(): for meta_name, meta_eval in metadata.items(): if not meta_eval(trait.get_metadata(meta_name)): break else: result[name] = trait return result
0.003328
def overall_MCC_calc(classes, table, TOP, P): """ Calculate Overall_MCC. :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: Overall_MCC as float """ try: cov_x_y = 0 cov_x_x = 0 cov_y_y = 0 matrix_sum = sum(list(TOP.values())) for i in classes: cov_x_x += TOP[i] * (matrix_sum - TOP[i]) cov_y_y += P[i] * (matrix_sum - P[i]) cov_x_y += (table[i][i] * matrix_sum - P[i] * TOP[i]) return cov_x_y / (math.sqrt(cov_y_y * cov_x_x)) except Exception: return "None"
0.00133
async def search(self, q: str, *, types: Optional[Iterable[str]] = ['track', 'playlist', 'artist', 'album'], limit: Optional[int] = 20, offset: Optional[int] = 0, market: Optional[str] = None) -> Dict[str, List[Union[Track, Playlist, Artist, Album]]]: """Access the spotify search functionality. Parameters ---------- q : str the search query types : Optional[Iterable[str]] A sequence of search types (can be any of `track`, `playlist`, `artist` or `album`) to refine the search request. A `ValueError` may be raised if a search type is found that is not valid. limit : Optional[int] The limit of search results to return when searching. Maximum limit is 50, any larger may raise a :class:`HTTPException` offset : Optional[int] The offset from where the api should start from in the search results. market : Optional[str] An ISO 3166-1 alpha-2 country code. Provide this parameter if you want to apply Track Relinking. Returns ------- results : Dict[str, List[Union[Track, Playlist, Artist, Album]]] The results of the search. """ if not hasattr(types, '__iter__'): raise TypeError('types must be an iterable.') elif not isinstance(types, list): types = list(item for item in types) types_ = set(types) if not types_.issubset(_SEARCH_TYPES): raise ValueError(_SEARCH_TYPE_ERR % types_.difference(_SEARCH_TYPES).pop()) kwargs = { 'q': q.replace(' ', '+'), 'queary_type': ','.join(tp.strip() for tp in types), 'market': market, 'limit': limit, 'offset': offset } data = await self.http.search(**kwargs) return {key: [_TYPES[obj['type']](self, obj) for obj in value['items']] for key, value in data.items()}
0.00458
def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port
0.0023
def _validate_virtualbox(self): ''' a method to validate that virtualbox is running on Win 7/8 machines :return: boolean indicating whether virtualbox is running ''' # validate operating system if self.localhost.os.sysname != 'Windows': return False win_release = float(self.localhost.os.release) if win_release >= 10.0: return False # validate docker-machine installation from os import devnull from subprocess import call, check_output, STDOUT sys_command = 'docker-machine --help' try: check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8') except Exception as err: raise Exception('Docker requires docker-machine to run on Win7/8. GoTo: https://www.docker.com') # validate virtualbox is running sys_command = 'docker-machine status %s' % self.vbox try: vbox_status = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8').replace('\n', '') except Exception as err: if not self.vbox: raise Exception('Docker requires VirtualBox to run on Win7/8. GoTo: https://www.virtualbox.org') elif self.vbox == "default": raise Exception('Virtualbox "default" not found. Container will not start without a valid virtualbox.') else: raise Exception('Virtualbox "%s" not found. Try using "default" instead.' % self.vbox) if 'Stopped' in vbox_status: raise Exception('Virtualbox "%s" is stopped. Try first running: docker-machine start %s' % (self.vbox, self.vbox)) return True
0.005119
def respond_unauthorized(self, request_authentication=False): """ Respond to the client that the request is unauthorized. :param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header. """ headers = {} if request_authentication: headers['WWW-Authenticate'] = 'Basic realm="' + self.__config['server_version'] + '"' self.send_response_full(b'Unauthorized', status=401, headers=headers) return
0.027426
def get_by_id(self, institution_id, _options=None): ''' Fetch a single institution by id. :param str institution_id: ''' options = _options or {} return self.client.post_public_key('/institutions/get_by_id', { 'institution_id': institution_id, 'options': options, })
0.005666
def search(self, start_ts, end_ts): """Searches through all documents and finds all documents that were modified or deleted within the range. Since we have very few documents in the doc dict when this is called, linear search is fine. This method is only used by rollbacks to query all the documents in the target engine within a certain timestamp window. The input will be two longs (converted from Bson timestamp) which specify the time range. The start_ts refers to the timestamp of the last oplog entry after a rollback. The end_ts is the timestamp of the last document committed to the backend. """ for _id in self.doc_dict: entry = self.doc_dict[_id] if entry.ts <= end_ts or entry.ts >= start_ts: yield entry.meta_dict
0.002342
def interpolate_slice(slice_rows, slice_cols, interpolator): """Interpolate the given slice of the larger array.""" fine_rows = np.arange(slice_rows.start, slice_rows.stop, slice_rows.step) fine_cols = np.arange(slice_cols.start, slice_cols.stop, slice_cols.step) return interpolator(fine_cols, fine_rows)
0.003115
def put_annotation(self, key, value): """ Annotate segment or subsegment with a key-value pair. Annotations will be indexed for later search query. :param str key: annotation key :param object value: annotation value. Any type other than string/number/bool will be dropped """ self._check_ended() if not isinstance(key, string_types): log.warning("ignoring non string type annotation key with type %s.", type(key)) return if not isinstance(value, annotation_value_types): log.warning("ignoring unsupported annotation value type %s.", type(value)) return if any(character not in _valid_annotation_key_characters for character in key): log.warning("ignoring annnotation with unsupported characters in key: '%s'.", key) return self.annotations[key] = value
0.006452
def get_sub_electrodes(self, adjacent_only=True): """ If this electrode contains multiple voltage steps, then it is possible to use only a subset of the voltage steps to define other electrodes. For example, an LiTiO2 electrode might contain three subelectrodes: [LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2] This method can be used to return all the subelectrodes with some options Args: adjacent_only: Only return electrodes from compounds that are adjacent on the convex hull, i.e. no electrodes returned will have multiple voltage steps if this is set true Returns: A list of ConversionElectrode objects """ if adjacent_only: return [self.__class__(self._vpairs[i:i + 1], self._working_ion_entry, self._composition) for i in range(len(self._vpairs))] sub_electrodes = [] for i in range(len(self._vpairs)): for j in range(i, len(self._vpairs)): sub_electrodes.append(self.__class__(self._vpairs[i:j + 1], self._working_ion_entry, self._composition)) return sub_electrodes
0.001471
def _getRunningApps(cls): """Get a list of the running applications.""" def runLoopAndExit(): AppHelper.stopEventLoop() AppHelper.callLater(1, runLoopAndExit) AppHelper.runConsoleEventLoop() # Get a list of running applications ws = AppKit.NSWorkspace.sharedWorkspace() apps = ws.runningApplications() return apps
0.005115
def get_visible_commands(self) -> List[str]: """Returns a list of commands that have not been hidden or disabled.""" commands = self.get_all_commands() # Remove the hidden commands for name in self.hidden_commands: if name in commands: commands.remove(name) # Remove the disabled commands for name in self.disabled_commands: if name in commands: commands.remove(name) return commands
0.004016
def Operate(self, values): """Takes a list of values and if at least one matches, returns True.""" for val in values: try: if self.Operation(val, self.right_operand): return True except (TypeError, ValueError): pass return False
0.014337
def check(self, func=None, name=None): """ A decorator to register a new Dockerflow check to be run when the /__heartbeat__ endpoint is called., e.g.:: from dockerflow.flask import checks @dockerflow.check def storage_reachable(): try: acme.storage.ping() except SlowConnectionException as exc: return [checks.Warning(exc.msg, id='acme.health.0002')] except StorageException as exc: return [checks.Error(exc.msg, id='acme.health.0001')] or using a custom name:: @dockerflow.check(name='acme-storage-check) def storage_reachable(): # ... """ if func is None: return functools.partial(self.check, name=name) if name is None: name = func.__name__ self.logger.info('Registered Dockerflow check %s', name) @functools.wraps(func) def decorated_function(*args, **kwargs): self.logger.info('Called Dockerflow check %s', name) return func(*args, **kwargs) self.checks[name] = decorated_function return decorated_function
0.001603
def record(self, i=0): """Returns a specific dbf record based on the supplied index.""" f = self.__getFileObj(self.dbf) if not self.numRecords: self.__dbfHeader() i = self.__restrictIndex(i) recSize = self.__recordFmt()[1] f.seek(0) f.seek(self.__dbfHeaderLength() + (i * recSize)) return self.__record()
0.005141
def from_json(cls, json_moc): """ Creates a MOC from a dictionary of HEALPix cell arrays indexed by their depth. Parameters ---------- json_moc : dict(str : [int] A dictionary of HEALPix cell arrays indexed by their depth. Returns ------- moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC` the MOC. """ intervals = np.array([]) for order, pix_l in json_moc.items(): if len(pix_l) == 0: continue pix = np.array(pix_l) p1 = pix p2 = pix + 1 shift = 2 * (AbstractMOC.HPY_MAX_NORDER - int(order)) itv = np.vstack((p1 << shift, p2 << shift)).T if intervals.size == 0: intervals = itv else: intervals = np.vstack((intervals, itv)) return cls(IntervalSet(intervals))
0.003247
def set_mindays(name, mindays): ''' Set the minimum number of days between password changes. See man passwd. CLI Example: .. code-block:: bash salt '*' shadow.set_mindays username 7 ''' pre_info = info(name) if mindays == pre_info['min']: return True cmd = 'passwd -n {0} {1}'.format(mindays, name) __salt__['cmd.run'](cmd, python_shell=False) post_info = info(name) if post_info['min'] != pre_info['min']: return post_info['min'] == mindays return False
0.00189
def login(username=None, password=None): """ Log in to PNC using the supplied username and password. The keycloak token will be saved for all subsequent pnc-cli operations until login is called again :return: """ global user user = UserConfig() if username: user.username = username else: user.username = user.input_username() if password: user.password = password else: user.password = user.input_password() if (not ( user.username and user.password) ): logging.error("Username and password must be provided for login") return; user.retrieve_keycloak_token() user.apiclient = user.create_api_client() save()
0.006964
def hashify_targets(targets: list, build_context) -> list: """Return sorted hashes of `targets`.""" return sorted(build_context.targets[target_name].hash(build_context) for target_name in listify(targets))
0.004329
def fetch_project(self, project_id): """Fetch an existing project and it's relevant metadata by ID. .. note:: If the project does not exist, this will raise a :class:`NotFound <google.cloud.exceptions.NotFound>` error. :type project_id: str :param project_id: The ID for this project. :rtype: :class:`~google.cloud.resource_manager.project.Project` :returns: A :class:`~google.cloud.resource_manager.project.Project` with metadata fetched from the API. """ project = self.new_project(project_id) project.reload() return project
0.003063
def find_best_step(err_vals): """ Returns the index of the lowest of the passed values. Catches nans etc. """ if np.all(np.isnan(err_vals)): raise ValueError('All err_vals are nans!') return np.nanargmin(err_vals)
0.004149
def dumps(obj, **kwargs): """ Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON formatted string. `JsonRef` objects will be dumped as the original reference object they were created from. :param obj: Object to serialize :param kwargs: Keyword arguments are the same as to :func:`json.dumps` """ kwargs["cls"] = _ref_encoder_factory(kwargs.get("cls", json.JSONEncoder)) return json.dumps(obj, **kwargs)
0.002174
def _did_timeout(self): """ Called when a resquest has timeout """ bambou_logger.debug('Bambou %s on %s has timeout (timeout=%ss)..' % (self._request.method, self._request.url, self.timeout)) self._has_timeouted = True if self.async: self._callback(self) else: return self
0.011834
def numpymat2df(mat): """ Sometimes (though not very often) it is useful to convert a numpy matrix which has no column names to a Pandas dataframe for use of the Pandas functions. This method converts a 2D numpy matrix to Pandas dataframe with default column headers. Parameters ---------- mat : The numpy matrix Returns ------- A pandas dataframe with the same data as the input matrix but with columns named x0, x1, ... x[n-1] for the number of columns. """ return pd.DataFrame( dict(('x%d' % i, mat[:, i]) for i in range(mat.shape[1])))
0.001647
async def get(self, request, resource=None, **kwargs): """Get resource or collection of resources. --- parameters: - name: resource in: path type: string """ if resource is not None and resource != '': return self.to_simple(request, resource, **kwargs) return self.to_simple(request, self.collection, many=True, **kwargs)
0.004717
def draw_court(ax=None, color='gray', lw=1, outer_lines=False): """Returns an axes with a basketball court drawn onto to it. This function draws a court based on the x and y-axis values that the NBA stats API provides for the shot chart data. For example the center of the hoop is located at the (0,0) coordinate. Twenty-two feet from the left of the center of the hoop in is represented by the (-220,0) coordinates. So one foot equals +/-10 units on the x and y-axis. Parameters ---------- ax : Axes, optional The Axes object to plot the court onto. color : matplotlib color, optional The color of the court lines. lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If `True` it draws the out of bound lines in same style as the rest of the court. Returns ------- ax : Axes The Axes object with the court on it. """ if ax is None: ax = plt.gca() # Create the various parts of an NBA basketball court # Create the basketball hoop hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False) # Create backboard backboard = Rectangle((-30, -12.5), 60, 0, linewidth=lw, color=color) # The paint # Create the outer box 0f the paint, width=16ft, height=19ft outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color, fill=False) # Create the inner box of the paint, widt=12ft, height=19ft inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color, fill=False) # Create free throw top arc top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False) # Create free throw bottom arc bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color, linestyle='dashed') # Restricted Zone, it is an arc with 4ft radius from center of the hoop restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw, color=color) # Three point line # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw, color=color) # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color) # 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw, color=color) # Center Court center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color) center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0, linewidth=lw, color=color) # List of the court elements to be plotted onto the axes court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw, bottom_free_throw, restricted, corner_three_a, corner_three_b, three_arc, center_outer_arc, center_inner_arc] if outer_lines: # Draw the half court line, baseline and side out bound lines outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw, color=color, fill=False) court_elements.append(outer_lines) # Add the court elements onto the axes for element in court_elements: ax.add_patch(element) return ax
0.00027
def init(name, *args, **kwargs): """Instantiate a timeframe from the catalog. """ if name in _TIMEFRAME_CATALOG: if rapport.config.get_int("rapport", "verbosity") >= 2: print("Initialize timeframe {0}: {1} {2}".format(name, args, kwargs)) try: return _TIMEFRAME_CATALOG[name](*args, **kwargs) except ValueError as e: print("Failed to initialize timeframe {0}: {1}!".format(name, e), file=sys.stderr) else: print("Failed to initialize timeframe {0}: Not in catalog!".format(name), file=sys.stderr) sys.exit(1)
0.006667
def line_to(self, x, y): """Adds a line to the path from the current point to position ``(x, y)`` in user-space coordinates. After this call the current point will be ``(x, y)``. If there is no current point before the call to :meth:`line_to` this method will behave as ``context.move_to(x, y)``. :param x: X coordinate of the end of the new line. :param y: Y coordinate of the end of the new line. :type float: x :type float: y """ cairo.cairo_line_to(self._pointer, x, y) self._check_status()
0.003373
def setup_actions(self): """ Connects slots to signals """ self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect(QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect( self.on_current_tab_changed) self.actionAbout.triggered.connect(self.on_about)
0.005736
def _get_locations(self, calc): """Locate locations within the profile.""" return (self._location_in(calc.profile), self._location_out(calc.profile))
0.01105
def controls(self, timeline_slider_args={}, toggle_args={}): """Creates interactive controls for the animation Creates both a play/pause button, and a time slider at once Parameters ---------- timeline_slider_args : Dict, optional A dictionary of arguments to be passed to timeline_slider() toggle_args : Dict, optional A dictionary of argyments to be passed to toggle() """ self.timeline_slider(**timeline_slider_args) self.toggle(**toggle_args)
0.003676
def setup(self): """ Do any setup work needed to run i3status modules """ for conf_name in self.py3_config["i3s_modules"]: module = I3statusModule(conf_name, self) self.i3modules[conf_name] = module if module.is_time_module: self.time_modules.append(module)
0.005865
def export_module_spec(spec, path, checkpoint_path, name_transform_fn): """Helper function to ModuleSpec.export().""" with tf.Graph().as_default(): m = Module(spec) assign_map = { name_transform_fn(name): value for name, value in m.variable_map.items() } tf_v1.train.init_from_checkpoint(checkpoint_path, assign_map) init_op = tf_v1.initializers.global_variables() with tf_v1.Session() as session: session.run(init_op) m.export(path, session)
0.012245
def create_service(self, *args, **kwargs): """Create a service to current scope. See :class:`pykechain.Client.create_service` for available parameters. .. versionadded:: 1.13 """ return self._client.create_service(*args, scope=self.id, **kwargs)
0.006969
def update_endpoint(self, updated_ed): """ Update a previously advertised endpoint_description. :param endpoint_description: an instance of EndpointDescription to update. Must not be None. :return: True if advertised, False if not (e.g. it's already been advertised) """ endpoint_id = updated_ed.get_id() with self._published_endpoints_lock: if self.get_advertised_endpoint(endpoint_id) is None: return False advertise_result = self._update(updated_ed) if advertise_result: self._remove_advertised(endpoint_id) self._add_advertised(updated_ed, advertise_result) return True return False
0.002587
def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ if len(self.actual_arguments) < 1: return self.print_help() audio_file_path = self.actual_arguments[0] try: audiofile = AudioFile(audio_file_path, rconf=self.rconf, logger=self.logger) audiofile.read_properties() if self.has_option([u"-f", u"--full"]): audiofile.read_samples_from_file() self.print_generic(audiofile.__unicode__()) return self.NO_ERROR_EXIT_CODE except OSError: self.print_error(u"Cannot read file '%s'" % (audio_file_path)) self.print_error(u"Make sure the input file path is written/escaped correctly") except AudioFileProbeError: self.print_error(u"Unable to call the ffprobe executable '%s'" % (self.rconf[RuntimeConfiguration.FFPROBE_PATH])) self.print_error(u"Make sure the path to ffprobe is correct") except AudioFileUnsupportedFormatError: self.print_error(u"Cannot read properties of file '%s'" % (audio_file_path)) self.print_error(u"Make sure the input file has a format supported by ffprobe") return self.ERROR_EXIT_CODE
0.005376
def inv(z: int) -> int: """$= z^{-1} mod q$, for z != 0""" # Adapted from curve25519_athlon.c in djb's Curve25519. z2 = z * z % q # 2 z9 = pow2(z2, 2) * z % q # 9 z11 = z9 * z2 % q # 11 z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0 z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0 z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ... z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0 return pow2(z2_250_0, 5) * z11 % q
0.001502
def IP_verified(directory, extensions_to_ignore=None, directories_to_ignore=None, files_to_ignore=None, verbose=False): """Find and audit potential data files that might violate IP This is the public function to be used to ascertain that all data in the specified directory tree has been audited according to the GA data IP tracking process. if IP_verified is False: # Stop and take remedial action ... else: # Proceed boldly with confidence verbose controls standard output. If verbose is False, only diagnostics about failed audits will appear. All files that check OK will pass silently. Optional arguments extensions_to_ignore, directories_to_ignore, and files_to_ignore are lists of things to skip. Examples are: extensions_to_ignore = ['.py','.c','.h', '.f'] # Ignore source code files_to_ignore = ['README.txt'] directories_to_ignore = ['.svn', 'misc'] None is also OK for these parameters. """ # Identify data files oldpath = None all_files = 0 ok_files = 0 all_files_accounted_for = True for dirpath, filename in identify_datafiles(directory, extensions_to_ignore, directories_to_ignore, files_to_ignore): if oldpath != dirpath: # Decide if dir header needs to be printed oldpath = dirpath first_time_this_dir = True all_files += 1 basename, ext = splitext(filename) license_filename = join(dirpath, basename + '.lic') # Look for a XML license file with the .lic status = 'OK' try: fid = open(license_filename) except IOError: status = 'NO LICENSE FILE' all_files_accounted_for = False else: fid.close() try: license_file_is_valid(license_filename, filename, dirpath, verbose=False) except audit_exceptions, e: all_files_accounted_for = False status = 'LICENSE FILE NOT VALID\n' status += 'REASON: %s\n' %e try: doc = xml2object(license_filename) except: status += 'XML file %s could not be read:'\ %license_filename fid = open(license_filename) status += fid.read() fid.close() else: pass #if verbose is True: # status += str(doc) if status == 'OK': ok_files += 1 else: # Only print status if there is a problem (no news is good news) if first_time_this_dir is True: print msg = ('Files without licensing info in dir: %s' % dirpath) print '.' * len(msg) print msg print '.' * len(msg) first_time_this_dir = False print filename + ' (Checksum = %s): '\ %str(compute_checksum(join(dirpath, filename))),\ status if verbose is True: print print '---------------------' print 'Audit result for dir: %s:' %directory print '---------------------' print 'Number of files audited: %d' %(all_files) print 'Number of files verified: %d' %(ok_files) print # Return result return all_files_accounted_for
0.004431
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None, return_foreground_mask=False): """ Render the segmentation map as an RGB image. Parameters ---------- size : None or float or iterable of int or iterable of float, optional Size of the rendered RGB image as ``(height, width)``. See :func:`imgaug.imgaug.imresize_single_image` for details. If set to None, no resizing is performed and the size of the segmentation map array is used. background_threshold : float, optional See :func:`imgaug.SegmentationMapOnImage.get_arr_int`. background_class_id : None or int, optional See :func:`imgaug.SegmentationMapOnImage.get_arr_int`. colors : None or list of tuple of int, optional Colors to use. One for each class to draw. If None, then default colors will be used. return_foreground_mask : bool, optional Whether to return a mask of the same size as the drawn segmentation map, containing True at any spatial location that is not the background class and False everywhere else. Returns ------- segmap_drawn : (H,W,3) ndarray Rendered segmentation map (dtype is uint8). foreground_mask : (H,W) ndarray Mask indicating the locations of foreground classes (dtype is bool). This value is only returned if `return_foreground_mask` is True. """ arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id) nb_classes = 1 + np.max(arr) segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8) if colors is None: colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS ia.do_assert(nb_classes <= len(colors), "Can't draw all %d classes as it would exceed the maximum number of %d available colors." % ( nb_classes, len(colors),)) ids_in_map = np.unique(arr) for c, color in zip(sm.xrange(nb_classes), colors): if c in ids_in_map: class_mask = (arr == c) segmap_drawn[class_mask] = color if return_foreground_mask: background_class_id = 0 if background_class_id is None else background_class_id foreground_mask = (arr != background_class_id) else: foreground_mask = None if size is not None: segmap_drawn = ia.imresize_single_image(segmap_drawn, size, interpolation="nearest") if foreground_mask is not None: foreground_mask = ia.imresize_single_image( foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0 if foreground_mask is not None: return segmap_drawn, foreground_mask return segmap_drawn
0.005045
def entry_point(args=None, configuration=None): """ Standard entry point for the docker interface CLI. Parameters ---------- args : list or None list of command line arguments or `None` to use `sys.argv` configuration : dict parsed configuration or `None` to load and build a configuration given the command line arguments Raises ------ SystemExit if the configuration is malformed or the docker subprocesses returns a non-zero status code """ # Parse basic information parser = argparse.ArgumentParser('di') base = BasePlugin() base.add_arguments(parser) args, remainder = parser.parse_known_args(args) command = args.command configuration = base.apply(configuration, None, args) logger = logging.getLogger('di') # Load all plugins and en/disable as desired plugin_cls = Plugin.load_plugins() plugins = configuration.get('plugins') if isinstance(plugins, list): plugins = [plugin_cls[name.lower()] for name in plugins] else: # Disable and enable specific plugins if isinstance(plugins, dict): try: for name in plugins.get('enable', []): plugin_cls[name.lower()].ENABLED = True for name in plugins.get('disable', []): plugin_cls[name.lower()].ENABLED = False except KeyError as ex: # pragma: no cover logger.fatal("could not resolve plugin %s. Available plugins: %s", ex, ", ".join(plugin_cls)) raise SystemExit(2) elif plugins is not None: # pragma: no cover logger.fatal("'plugins' must be a `list`, `dict`, or `None` but got `%s`", type(plugins)) raise SystemExit(2) # Restrict plugins to enabled ones plugins = list(sorted([cls() for cls in plugin_cls.values() if cls.ENABLED], key=lambda x: x.ORDER)) # Construct the schema schema = base.SCHEMA for cls in plugin_cls.values(): schema = util.merge(schema, cls.SCHEMA) # Ensure that the plugins are relevant to the command plugins = [plugin for plugin in plugins if plugin.COMMANDS == 'all' or command in plugin.COMMANDS] parser = argparse.ArgumentParser('di %s' % command) for plugin in plugins: plugin.add_arguments(parser) args = parser.parse_args(remainder) # Apply defaults util.set_default_from_schema(configuration, schema) # Apply all the plugins in order status_code = 0 logger.debug("configuration:\n%s", json.dumps(configuration, indent=4)) for plugin in plugins: logger.debug("applying plugin '%s'", plugin) try: configuration = plugin.apply(configuration, schema, args) assert configuration is not None, "plugin '%s' returned `None`" % plugin except Exception as ex: # pragma: no cover logger.exception("failed to apply plugin '%s': %s", plugin, ex) message = "please rerun the command using `di --log-level debug` and file a new " \ "issue containing the output of the command here: https://github.com/" \ "spotify/docker_interface/issues/new" logger.fatal("\033[%dm%s\033[0m", 31, message) status_code = 3 break logger.debug("configuration:\n%s", json.dumps(configuration, indent=4)) for plugin in reversed(plugins): logger.debug("tearing down plugin '%s'", plugin) plugin.cleanup() status_code = configuration.get('status-code', status_code) if status_code: raise SystemExit(status_code)
0.002401
def _on_channel_open(self, channel): """ Callback used when a channel is opened. This registers all the channel callbacks. Args: channel (pika.channel.Channel): The channel that successfully opened. """ channel.add_on_close_callback(self._on_channel_close) channel.add_on_cancel_callback(self._on_cancel) channel.basic_qos(callback=self._on_qosok, **config.conf["qos"])
0.006696
def _show_prompt(self, prompt=None, html=False, newline=True): """ Writes a new prompt at the end of the buffer. Parameters ---------- prompt : str, optional The prompt to show. If not specified, the previous prompt is used. html : bool, optional (default False) Only relevant when a prompt is specified. If set, the prompt will be inserted as formatted HTML. Otherwise, the prompt will be treated as plain text, though ANSI color codes will be handled. newline : bool, optional (default True) If set, a new line will be written before showing the prompt if there is not already a newline at the end of the buffer. """ # Save the current end position to support _append*(before_prompt=True). cursor = self._get_end_cursor() self._append_before_prompt_pos = cursor.position() # Insert a preliminary newline, if necessary. if newline and cursor.position() > 0: cursor.movePosition(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor) if cursor.selection().toPlainText() != '\n': self._append_plain_text('\n') # Write the prompt. self._append_plain_text(self._prompt_sep) if prompt is None: if self._prompt_html is None: self._append_plain_text(self._prompt) else: self._append_html(self._prompt_html) else: if html: self._prompt = self._append_html_fetching_plain_text(prompt) self._prompt_html = prompt else: self._append_plain_text(prompt) self._prompt = prompt self._prompt_html = None self._prompt_pos = self._get_end_cursor().position() self._prompt_started()
0.002087
def __version(client): ''' Grab DRAC version ''' versions = {9: 'CMC', 8: 'iDRAC6', 10: 'iDRAC6', 11: 'iDRAC6', 16: 'iDRAC7', 17: 'iDRAC7'} if isinstance(client, paramiko.SSHClient): (stdin, stdout, stderr) = client.exec_command('racadm getconfig -g idRacInfo') for i in stdout.readlines(): if i[2:].startswith('idRacType'): return versions.get(int(i[2:].split('=')[1]), None) return None
0.003711
async def edit(self, *, reason=None, **options): """|coro| Edits the channel. You must have the :attr:`~Permissions.manage_channels` permission to use this. Parameters ---------- name: :class:`str` The new category's name. position: :class:`int` The new category's position. nsfw: :class:`bool` To mark the category as NSFW or not. reason: Optional[:class:`str`] The reason for editing this category. Shows up on the audit log. Raises ------ InvalidArgument If position is less than 0 or greater than the number of categories. Forbidden You do not have permissions to edit the category. HTTPException Editing the category failed. """ try: position = options.pop('position') except KeyError: pass else: await self._move(position, reason=reason) self.position = position if options: data = await self._state.http.edit_channel(self.id, reason=reason, **options) self._update(self.guild, data)
0.003303
def delete_many(cls, documents): """Delete multiple documents""" # Ensure all documents have been converted to frames frames = cls._ensure_frames(documents) all_count = len(documents) assert len([f for f in frames if '_id' in f._document]) == all_count, \ "Can't delete documents without `_id`s" # Send delete signal signal('delete').send(cls, frames=frames) # Prepare the documents to be deleted ids = [f._id for f in frames] # Delete the documents cls.get_collection().delete_many({'_id': {'$in': ids}}) # Send deleted signal signal('deleted').send(cls, frames=frames)
0.00431
def fit_multinest(self, n_live_points=1000, basename=None, verbose=True, refit=False, overwrite=False, test=False, **kwargs): """ Fits model using MultiNest, via pymultinest. :param n_live_points: Number of live points to use for MultiNest fit. :param basename: Where the MulitNest-generated files will live. By default this will be in a folder named `chains` in the current working directory. Calling this will define a `_mnest_basename` attribute for this object. :param verbose: Whether you want MultiNest to talk to you. :param refit, overwrite: Set either of these to true if you want to delete the MultiNest files associated with the given basename and start over. :param **kwargs: Additional keyword arguments will be passed to :func:`pymultinest.run`. """ if basename is not None: #Should this even be allowed? self.mnest_basename = basename basename = self.mnest_basename if verbose: logging.info('MultiNest basename: {}'.format(basename)) folder = os.path.abspath(os.path.dirname(basename)) if not os.path.exists(folder): os.makedirs(folder) #If previous fit exists, see if it's using the same # observed properties prop_nomatch = False propfile = '{}properties.json'.format(basename) """ if os.path.exists(propfile): with open(propfile) as f: props = json.load(f) if set(props.keys()) != set(self.properties.keys()): prop_nomatch = True else: for k,v in props.items(): if np.size(v)==2: if not self.properties[k][0] == v[0] and \ self.properties[k][1] == v[1]: props_nomatch = True else: if not self.properties[k] == v: props_nomatch = True if prop_nomatch and not overwrite: raise ValueError('Properties not same as saved chains ' + '(basename {}*). '.format(basename) + 'Use overwrite=True to fit.') """ if refit or overwrite: files = glob.glob('{}*'.format(basename)) [os.remove(f) for f in files] short_basename = self._mnest_basename mnest_kwargs = dict(n_live_points=n_live_points, outputfiles_basename=short_basename, verbose=verbose) for k,v in kwargs.items(): mnest_kwargs[k] = v if test: print('pymultinest.run() with the following kwargs: {}'.format(mnest_kwargs)) else: wd = os.getcwd() os.chdir(os.path.join(folder, '..')) pymultinest.run(self.mnest_loglike, self.mnest_prior, self.n_params, **mnest_kwargs) os.chdir(wd) #with open(propfile, 'w') as f: # json.dump(self.properties, f, indent=2) self._make_samples()
0.004496
def print_value(value: Any, type_: GraphQLInputType) -> str: """Convenience function for printing a Python value""" return print_ast(ast_from_value(value, type_))
0.005882
def _parse_guild_members(self, parsed_content): """ Parses the guild's member and invited list. Parameters ---------- parsed_content: :class:`bs4.Tag` The parsed content of the guild's page """ member_rows = parsed_content.find_all("tr", {'bgcolor': ["#D4C0A1", "#F1E0C6"]}) previous_rank = {} for row in member_rows: columns = row.find_all('td') values = tuple(c.text.replace("\u00a0", " ") for c in columns) if len(columns) == COLS_GUILD_MEMBER: self._parse_current_member(previous_rank, values) if len(columns) == COLS_INVITED_MEMBER: self._parse_invited_member(values)
0.004071
def readline(self, size=None): """Read a single line from rfile buffer and return it. Args: size (int): minimum amount of data to read Returns: bytes: One line from rfile. """ if size is not None: data = self.rfile.readline(size) self.bytes_read += len(data) self._check_length() return data # User didn't specify a size ... # We read the line in chunks to make sure it's not a 100MB line ! res = [] while True: data = self.rfile.readline(256) self.bytes_read += len(data) self._check_length() res.append(data) # See https://github.com/cherrypy/cherrypy/issues/421 if len(data) < 256 or data[-1:] == LF: return EMPTY.join(res)
0.00232
def lookup_subclass(cls, d): """Look up a class based on a serialized dictionary containing a typeid Args: d (dict): Dictionary with key "typeid" Returns: Serializable subclass """ try: typeid = d["typeid"] except KeyError: raise FieldError("typeid not present in keys %s" % list(d)) subclass = cls._subcls_lookup.get(typeid, None) if not subclass: raise FieldError("'%s' not a valid typeid" % typeid) else: return subclass
0.003509
def return_hdr(self): """Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- It only reads hdf5 matlab files and the VARiable needs to be called 'data' h5py is necessary for this function """ # fieldtrip does not have this information orig = dict() subj_id = str() start_time = datetime.fromordinal(1) # fake try: ft_data = loadmat(self.filename, struct_as_record=True, squeeze_me=True) if VAR not in ft_data: raise KeyError('Save the FieldTrip variable as ''{}''' ''.format(VAR)) ft_data = ft_data[VAR] s_freq = ft_data['fsample'].astype('float64').item() n_samples = ft_data['trial'].item().shape[1] chan_name = list(ft_data['label'].item()) except NotImplementedError: with File(self.filename) as f: if VAR not in f.keys(): raise KeyError('Save the FieldTrip variable as ''{}''' ''.format(VAR)) s_freq = int(f[VAR]['fsample'].value.squeeze()) chan_name = read_hdf5_chan_name(f, f[VAR]['label']) n_samples = int(around(f[f[VAR]['trial'][0].item()].shape[0])) return subj_id, start_time, s_freq, chan_name, n_samples, orig
0.001074
def DataRefreshRequired(self, path=None, last=None): """True if we need to update this path from the client. Args: path: The path relative to the root to check freshness of. last: An aff4:last attribute to check freshness of. At least one of path or last must be supplied. Returns: True if the path hasn't been updated in the last self.max_age_before_refresh seconds, else False. Raises: type_info.TypeValueError: If no arguments are supplied. """ # If we didn't get given a last attribute, use the path to get one from the # object. if last is None: if path is None: # If we didn't get a path either, we can't do anything. raise type_info.TypeValueError("Either 'path' or 'last' must" " be supplied as an argument.") fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) # We really care about the last time the stat was updated, so we use # this instead of the LAST attribute, which is the last time anything # was updated about the object. stat_obj = fd.Get(fd.Schema.STAT) if stat_obj: last = stat_obj.age else: last = rdfvalue.RDFDatetime(0) # If the object doesn't even have a LAST attribute by this point, # we say it hasn't been accessed within the cache expiry time. if last is None: return True last = last.AsDatetime() # Remember to use UTC time, since that's what the datastore uses. return datetime.datetime.utcnow() - last > self.max_age_before_refresh
0.006254
def chu_liu_edmonds(length: int, score_matrix: numpy.ndarray, current_nodes: List[bool], final_edges: Dict[int, int], old_input: numpy.ndarray, old_output: numpy.ndarray, representatives: List[Set[int]]): """ Applies the chu-liu-edmonds algorithm recursively to a graph with edge weights defined by score_matrix. Note that this function operates in place, so variables will be modified. Parameters ---------- length : ``int``, required. The number of nodes. score_matrix : ``numpy.ndarray``, required. The score matrix representing the scores for pairs of nodes. current_nodes : ``List[bool]``, required. The nodes which are representatives in the graph. A representative at it's most basic represents a node, but as the algorithm progresses, individual nodes will represent collapsed cycles in the graph. final_edges: ``Dict[int, int]``, required. An empty dictionary which will be populated with the nodes which are connected in the maximum spanning tree. old_input: ``numpy.ndarray``, required. old_output: ``numpy.ndarray``, required. representatives : ``List[Set[int]]``, required. A list containing the nodes that a particular node is representing at this iteration in the graph. Returns ------- Nothing - all variables are modified in place. """ # Set the initial graph to be the greedy best one. parents = [-1] for node1 in range(1, length): parents.append(0) if current_nodes[node1]: max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 # Check if this solution has a cycle. has_cycle, cycle = _find_cycle(parents, length, current_nodes) # If there are no cycles, find all edges and return. if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return # Otherwise, we have a cycle so we need to remove an edge. # From here until the recursive call is the contraction stage of the algorithm. cycle_weight = 0.0 # Find the weight of the cycle. index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] # For each node in the graph, find the maximum weight incoming # and outgoing edge into the cycle. cycle_representative = cycle[0] for node in range(length): if not current_nodes[node] or node in cycle: continue in_edge_weight = float("-inf") in_edge = -1 out_edge_weight = float("-inf") out_edge = -1 for node_in_cycle in cycle: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle # Add the new edge score to the cycle weight # and subtract the edge we're considering removing. score = (cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle]) if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] # For the next recursive iteration, we want to consider the cycle as a # single node. Here we collapse the cycle into the first node in the # cycle (first node is arbitrary), set all the other nodes not be # considered in the next iteration. We also keep track of which # representatives we are considering this iteration because we need # them below to check if we're done. considered_representatives: List[Set[int]] = [] for i, node_in_cycle in enumerate(cycle): considered_representatives.append(set()) if i > 0: # We need to consider at least one # node in the cycle, arbitrarily choose # the first. current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) chu_liu_edmonds(length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives) # Expansion stage. # check each node in cycle, if one of its representatives # is a key in the final_edges, it is the one we need. found = False key_node = -1 for i, node in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous]
0.000498