code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def data(self): """ Returns self.json loaded as a python object. """ try: data = self._data except AttributeError: data = self._data = json.loads(self.json) return data
Returns self.json loaded as a python object.
def dbmin10years(self, value=None): """ Corresponds to IDD Field `dbmin10years` 10-year return period values for minimum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmin10years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmin10years`'.format(value)) self._dbmin10years = value
Corresponds to IDD Field `dbmin10years` 10-year return period values for minimum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmin10years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def build_java_worker_command( java_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, temp_dir, ): """This method assembles the command used to start a Java worker. Args: java_worker_options (str): The command options for Java worker. redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. temp_dir (str): The path of the temporary directory Ray will use. Returns: The command string for starting Java worker. """ assert java_worker_options is not None command = "java ".format(java_worker_options) if redis_address is not None: command += "-Dray.redis.address={} ".format(redis_address) if plasma_store_name is not None: command += ( "-Dray.object-store.socket-name={} ".format(plasma_store_name)) if raylet_name is not None: command += "-Dray.raylet.socket-name={} ".format(raylet_name) if redis_password is not None: command += "-Dray.redis.password={} ".format(redis_password) command += "-Dray.home={} ".format(RAY_HOME) # TODO(suquark): We should use temp_dir as the input of a java worker. command += "-Dray.log-dir={} ".format(os.path.join(temp_dir, "sockets")) if java_worker_options: # Put `java_worker_options` in the last, so it can overwrite the # above options. command += java_worker_options + " " command += "org.ray.runtime.runner.worker.DefaultWorker" return command
This method assembles the command used to start a Java worker. Args: java_worker_options (str): The command options for Java worker. redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. temp_dir (str): The path of the temporary directory Ray will use. Returns: The command string for starting Java worker.
def clean_course(self): """ Verify course ID and retrieve course details. """ course_id = self.cleaned_data[self.Fields.COURSE].strip() if not course_id: return None try: client = EnrollmentApiClient() return client.get_course_details(course_id) except (HttpClientError, HttpServerError): raise ValidationError(ValidationMessages.INVALID_COURSE_ID.format(course_id=course_id))
Verify course ID and retrieve course details.
def decrease_frequency(self, frequency=None): """ Decreases the frequency. :param frequency: the frequency to decrease by, 1 if None :type frequency: int """ if frequency is None: javabridge.call(self.jobject, "decreaseFrequency", "()V") else: javabridge.call(self.jobject, "decreaseFrequency", "(I)V", frequency)
Decreases the frequency. :param frequency: the frequency to decrease by, 1 if None :type frequency: int
def _process_response_xml(self, response_xml): ''' Processa o xml de resposta e caso não existam erros retorna um dicionario com o codigo e data. :return: dictionary ''' result = {} xml = ElementTree.fromstring(response_xml) if xml.tag == 'errors': logger.error( u'Erro no pedido de pagamento ao PagSeguro.' + ' O xml de resposta foi: %s' % response_xml) errors_message = u'Ocorreu algum problema com os dados do pagamento: ' for error in xml.findall('error'): error_code = error.find('code').text error_message = error.find('message').text errors_message += u'\n (code=%s) %s' % (error_code, error_message) raise PagSeguroPaymentException(errors_message) if xml.tag == 'checkout': result['code'] = xml.find('code').text try: xml_date = xml.find('date').text result['date'] = dateutil.parser.parse(xml_date) except: logger.exception(u'O campo date não foi encontrado ou é invalido') result['date'] = None else: raise PagSeguroPaymentException( u'Erro ao processar resposta do pagamento: tag "checkout" nao encontrada no xml de resposta') return result
Processa o xml de resposta e caso não existam erros retorna um dicionario com o codigo e data. :return: dictionary
def purge(self, queue, nowait=True, ticket=None, cb=None): ''' Purge all messages in a queue. ''' nowait = nowait and self.allow_nowait() and not cb args = Writer() args.write_short(ticket or self.default_ticket).\ write_shortstr(queue).\ write_bit(nowait) self.send_frame(MethodFrame(self.channel_id, 50, 30, args)) if not nowait: self._purge_cb.append(cb) return self.channel.add_synchronous_cb(self._recv_purge_ok)
Purge all messages in a queue.
def parallel_map(task, values, task_args=tuple(), task_kwargs={}, # pylint: disable=W0102 num_processes=CPU_COUNT): """ Parallel execution of a mapping of `values` to the function `task`. This is functionally equivalent to:: result = [task(value, *task_args, **task_kwargs) for value in values] On Windows this function defaults to a serial implementation to avoid the overhead from spawning processes in Windows. Args: task (func): Function that is to be called for each value in ``values``. values (array_like): List or array of values for which the ``task`` function is to be evaluated. task_args (list): Optional additional arguments to the ``task`` function. task_kwargs (dict): Optional additional keyword argument to the ``task`` function. num_processes (int): Number of processes to spawn. Returns: result: The result list contains the value of ``task(value, *task_args, **task_kwargs)`` for each value in ``values``. Raises: QiskitError: If user interrupts via keyboard. Events: terra.parallel.start: The collection of parallel tasks are about to start. terra.parallel.update: One of the parallel task has finished. terra.parallel.finish: All the parallel tasks have finished. """ if len(values) == 1: return [task(values[0], *task_args, **task_kwargs)] Publisher().publish("terra.parallel.start", len(values)) nfinished = [0] def _callback(_): nfinished[0] += 1 Publisher().publish("terra.parallel.done", nfinished[0]) # Run in parallel if not Win and not in parallel already if platform.system() != 'Windows' and num_processes > 1 \ and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE': os.environ['QISKIT_IN_PARALLEL'] = 'TRUE' try: pool = Pool(processes=num_processes) async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs, _callback) for value in values] while not all([item.ready() for item in async_res]): for item in async_res: item.wait(timeout=0.1) pool.terminate() pool.join() except KeyboardInterrupt: pool.terminate() pool.join() Publisher().publish("terra.parallel.finish") raise QiskitError('Keyboard interrupt in parallel_map.') Publisher().publish("terra.parallel.finish") os.environ['QISKIT_IN_PARALLEL'] = 'FALSE' return [ar.get() for ar in async_res] # Cannot do parallel on Windows , if another parallel_map is running in parallel, # or len(values) == 1. results = [] for _, value in enumerate(values): result = task(value, *task_args, **task_kwargs) results.append(result) _callback(0) Publisher().publish("terra.parallel.finish") return results
Parallel execution of a mapping of `values` to the function `task`. This is functionally equivalent to:: result = [task(value, *task_args, **task_kwargs) for value in values] On Windows this function defaults to a serial implementation to avoid the overhead from spawning processes in Windows. Args: task (func): Function that is to be called for each value in ``values``. values (array_like): List or array of values for which the ``task`` function is to be evaluated. task_args (list): Optional additional arguments to the ``task`` function. task_kwargs (dict): Optional additional keyword argument to the ``task`` function. num_processes (int): Number of processes to spawn. Returns: result: The result list contains the value of ``task(value, *task_args, **task_kwargs)`` for each value in ``values``. Raises: QiskitError: If user interrupts via keyboard. Events: terra.parallel.start: The collection of parallel tasks are about to start. terra.parallel.update: One of the parallel task has finished. terra.parallel.finish: All the parallel tasks have finished.
def clone(self): """ Command Section: clone Clone a VM from a template """ self.config['hostname'] = self.config['hostname'].lower() self.config['mem'] = int(self.config['mem'] * 1024) # convert GB to MB print("Cloning %s to new host %s with %sMB RAM..." % ( self.config['template'], self.config['hostname'], self.config['mem'] )) # initialize a list to hold our network settings ip_settings = list() # Get network settings for each IP for key, ip_string in enumerate(self.config['ips']): # convert ip from string to the 'IPAddress' type ip = IPAddress(ip_string) # determine network this IP is in for network in self.config['networks']: if ip in IPNetwork(network): self.config['networks'][network]['ip'] = ip ipnet = IPNetwork(network) self.config['networks'][network]['subnet_mask'] = str( ipnet.netmask ) ip_settings.append(self.config['networks'][network]) # throw an error if we couldn't find a network for this ip if not any(d['ip'] == ip for d in ip_settings): print("I don't know what network %s is in. You can supply " "settings for this network in config.yml." % ip_string) sys.exit(1) # network to place new VM in self.get_obj([vim.Network], ip_settings[0]['network']) datacenter = self.get_obj([vim.Datacenter], ip_settings[0]['datacenter'] ) # get the folder where VMs are kept for this datacenter if self.config['destination_folder']: destfolder = self.content.searchIndex.FindByInventoryPath( self.config['destination_folder'] ) else: destfolder = datacenter.vmFolder cluster = self.get_obj([vim.ClusterComputeResource], ip_settings[0]['cluster'] ) resource_pool_str = self.config['resource_pool'] # resource_pool setting in config file takes priority over the # default 'Resources' pool if resource_pool_str == 'Resources' \ and ('resource_pool' in ip_settings[key]): resource_pool_str = ip_settings[key]['resource_pool'] resource_pool = self.get_resource_pool(cluster, resource_pool_str) host_system = self.config['host'] if host_system != "": host_system = self.get_obj([vim.HostSystem], self.config['host'] ) if self.debug: self.print_debug( "Destination cluster", cluster ) self.print_debug( "Resource pool", resource_pool ) if resource_pool is None: # use default resource pool of target cluster resource_pool = cluster.resourcePool datastore = None if self.config['datastore']: datastore = self.get_obj( [vim.Datastore], self.config['datastore']) elif 'datastore' in ip_settings[0]: datastore = self.get_obj( [vim.Datastore], ip_settings[0]['datastore']) if datastore is None: print("Error: Unable to find Datastore '%s'" % ip_settings[0]['datastore']) sys.exit(1) if self.config['template_folder']: template_vm = self.get_vm_failfast( self.config['template'], False, 'Template VM', path=self.config['template_folder'] ) else: template_vm = self.get_vm_failfast( self.config['template'], False, 'Template VM' ) # Relocation spec relospec = vim.vm.RelocateSpec() relospec.datastore = datastore if host_system: relospec.host = host_system if resource_pool: relospec.pool = resource_pool # Networking self.config for VM and guest OS devices = [] adaptermaps = [] # add existing NIC devices from template to our list of NICs # to be created try: for device in template_vm.config.hardware.device: if hasattr(device, 'addressType'): # this is a VirtualEthernetCard, so we'll delete it nic = vim.vm.device.VirtualDeviceSpec() nic.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.remove nic.device = device devices.append(nic) except: # not the most graceful handling, but unable to reproduce # user's issues in #57 at this time. pass # create a Network device for each static IP for key, ip in enumerate(ip_settings): # VM device nic = vim.vm.device.VirtualDeviceSpec() # or edit if a device exists nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nic.device = vim.vm.device.VirtualVmxnet3() nic.device.wakeOnLanEnabled = True nic.device.addressType = 'assigned' # 4000 seems to be the value to use for a vmxnet3 device nic.device.key = 4000 nic.device.deviceInfo = vim.Description() nic.device.deviceInfo.label = 'Network Adapter %s' % (key + 1) if 'dvportgroup' in ip_settings[key]: dvpg = ip_settings[key]['dvportgroup'] nic.device.deviceInfo.summary = dvpg pg_obj = self.get_obj([vim.dvs.DistributedVirtualPortgroup], dvpg) # noqa dvs_port_connection = vim.dvs.PortConnection() dvs_port_connection.portgroupKey = pg_obj.key dvs_port_connection.switchUuid = ( pg_obj.config.distributedVirtualSwitch.uuid ) # did it to get pep8 e_nic = vim.vm.device.VirtualEthernetCard nic.device.backing = ( e_nic.DistributedVirtualPortBackingInfo() ) nic.device.backing.port = dvs_port_connection else: nic.device.deviceInfo.summary = ip_settings[key]['network'] nic.device.backing = ( vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() ) nic.device.backing.network = ( self.get_obj([vim.Network], ip_settings[key]['network']) ) nic.device.backing.deviceName = ip_settings[key]['network'] nic.device.backing.useAutoDetect = False nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.startConnected = True nic.device.connectable.allowGuestControl = True devices.append(nic) if 'customspecname' in ip_settings[key]: custom_spec_name = ip_settings[key]['customspecname'] customspec = ( self.get_customization_settings(custom_spec_name) ) guest_map = customspec.nicSettingMap[0] else: customspec = vim.vm.customization.Specification() # guest NIC settings, i.e. 'adapter map' guest_map = vim.vm.customization.AdapterMapping() guest_map.adapter = vim.vm.customization.IPSettings() guest_map.adapter.ip = vim.vm.customization.FixedIp() guest_map.adapter.ip.ipAddress = str(ip_settings[key]['ip']) if 'subnet_mask' in ip_settings[key]: guest_map.adapter.subnetMask = ( str(ip_settings[key]['subnet_mask']) ) if 'gateway' in ip_settings[key]: guest_map.adapter.gateway = ip_settings[key]['gateway'] if self.config['domain']: guest_map.adapter.dnsDomain = self.config['domain'] adaptermaps.append(guest_map) # DNS settings if 'dns_servers' in self.config: globalip = vim.vm.customization.GlobalIPSettings() globalip.dnsServerList = self.config['dns_servers'] globalip.dnsSuffixList = self.config['domain'] customspec.globalIPSettings = globalip # Hostname settings ident = vim.vm.customization.LinuxPrep() ident.domain = self.config['domain'] ident.hostName = vim.vm.customization.FixedName() ident.hostName.name = self.config['hostname'] customspec.nicSettingMap = adaptermaps customspec.identity = ident # VM config spec vmconf = vim.vm.ConfigSpec() vmconf.numCPUs = self.config['cpus'] vmconf.memoryMB = self.config['mem'] vmconf.cpuHotAddEnabled = True vmconf.memoryHotAddEnabled = True vmconf.deviceChange = devices # Clone spec clonespec = vim.vm.CloneSpec() clonespec.location = relospec clonespec.config = vmconf clonespec.customization = customspec clonespec.powerOn = True clonespec.template = False self.addDisks(template_vm, clonespec) if self.debug: self.print_debug("CloneSpec", clonespec) # fire the clone task tasks = [template_vm.Clone(folder=destfolder, name=self.config['hostname'], spec=clonespec )] result = self.WaitForTasks(tasks) if self.config['post_clone_cmd']: try: # helper env variables os.environ['EZMOMI_CLONE_HOSTNAME'] = self.config['hostname'] print("Running --post-clone-cmd %s" % self.config['post_clone_cmd']) os.system(self.config['post_clone_cmd']) except Exception as e: print("Error running post-clone command. Exception: %s" % e) pass # send notification email if self.config['mail']: self.send_email()
Command Section: clone Clone a VM from a template
def boxes_intersect(box1, box2): """Determines if two rectangles, each input as a tuple (xmin, xmax, ymin, ymax), intersect.""" xmin1, xmax1, ymin1, ymax1 = box1 xmin2, xmax2, ymin2, ymax2 = box2 if interval_intersection_width(xmin1, xmax1, xmin2, xmax2) and \ interval_intersection_width(ymin1, ymax1, ymin2, ymax2): return True else: return False
Determines if two rectangles, each input as a tuple (xmin, xmax, ymin, ymax), intersect.
def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) if sort is None: difference = sorted(difference) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex
def enable_external_loaders(obj): """Enable external service loaders like `VAULT_` and `REDIS_` looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF` """ for name, loader in ct.EXTERNAL_LOADERS.items(): enabled = getattr( obj, "{}_ENABLED_FOR_DYNACONF".format(name.upper()), False ) if ( enabled and enabled not in false_values and loader not in obj.LOADERS_FOR_DYNACONF ): # noqa obj.logger.debug("loaders: Enabling %s", loader) obj.LOADERS_FOR_DYNACONF.insert(0, loader)
Enable external service loaders like `VAULT_` and `REDIS_` looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
def fit(self): """Fit MCMC AgeDepthModel""" self._mcmcfit = self.mcmcsetup.run() self._mcmcfit.burnin(self.burnin) dmin = min(self._mcmcfit.depth_segments) dmax = max(self._mcmcfit.depth_segments) self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments) self._depth = np.arange(dmin, dmax + 0.001) self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
Fit MCMC AgeDepthModel
def _netstat_route_sunos(): ''' Return netstat routing information for SunOS ''' ret = [] cmd = 'netstat -f inet -rn | tail +5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'addr_family': 'inet', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[2], 'interface': comps[5] if len(comps) >= 6 else ''}) cmd = 'netstat -f inet6 -rn | tail +5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'addr_family': 'inet6', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[2], 'interface': comps[5] if len(comps) >= 6 else ''}) return ret
Return netstat routing information for SunOS
def read_snapshots(path, comments="#", directed=False, delimiter=None, nodetype=None, timestamptype=None, encoding='utf-8', keys=False): """Read a DyNetx graph from snapshot graph list format. Parameters ---------- path : basestring The desired output filename delimiter : character Column delimiter """ ids = None lines = (line.decode(encoding) for line in path) if keys: ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype) return parse_snapshots(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype, timestamptype=timestamptype, keys=ids)
Read a DyNetx graph from snapshot graph list format. Parameters ---------- path : basestring The desired output filename delimiter : character Column delimiter
def request(self, method, url, **kwargs): """ Overrides ``requests.Session.request`` to renew the cookie and then retry the original request (if required). """ resp = super(CookieSession, self).request(method, url, **kwargs) if not self._auto_renew: return resp is_expired = any(( resp.status_code == 403 and response_to_json_dict(resp).get('error') == 'credentials_expired', resp.status_code == 401 )) if is_expired: self.login() resp = super(CookieSession, self).request(method, url, **kwargs) return resp
Overrides ``requests.Session.request`` to renew the cookie and then retry the original request (if required).
def _iter_restrict(self, zeros, ones): """Iterate through indices of all table entries that vary.""" inputs = list(self.inputs) unmapped = dict() for i, v in enumerate(self.inputs): if v in zeros: inputs[i] = 0 elif v in ones: inputs[i] = 1 else: unmapped[v] = i vs = sorted(unmapped.keys()) for num in range(1 << len(vs)): for v, val in boolfunc.num2point(num, vs).items(): inputs[unmapped[v]] = val yield sum((val << i) for i, val in enumerate(inputs))
Iterate through indices of all table entries that vary.
def _setup_states(state_definitions, prev=()): """Create a StateList object from a 'states' Workflow attribute.""" states = list(prev) for state_def in state_definitions: if len(state_def) != 2: raise TypeError( "The 'state' attribute of a workflow should be " "a two-tuple of strings; got %r instead." % (state_def,) ) name, title = state_def state = State(name, title) if any(st.name == name for st in states): # Replacing an existing state states = [state if st.name == name else st for st in states] else: states.append(state) return StateList(states)
Create a StateList object from a 'states' Workflow attribute.
def resolve(self, geoid, id_only=False): ''' Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone. ''' level, code, validity = geoids.parse(geoid) qs = self(level=level, code=code) if id_only: qs = qs.only('id') if validity == 'latest': result = qs.latest() else: result = qs.valid_at(validity).first() return result.id if id_only and result else result
Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone.
def handle_no_document(self, item_session: ItemSession) -> Actions: '''Callback for successful responses containing no useful document. Returns: A value from :class:`.hook.Actions`. ''' self._waiter.reset() action = self.handle_response(item_session) if action == Actions.NORMAL: item_session.set_status(Status.skipped) return action
Callback for successful responses containing no useful document. Returns: A value from :class:`.hook.Actions`.
def _constrain_pan(self): """Constrain bounding box.""" if self.xmin is not None and self.xmax is not None: p0 = self.xmin + 1. / self._zoom[0] p1 = self.xmax - 1. / self._zoom[0] p0, p1 = min(p0, p1), max(p0, p1) self._pan[0] = np.clip(self._pan[0], p0, p1) if self.ymin is not None and self.ymax is not None: p0 = self.ymin + 1. / self._zoom[1] p1 = self.ymax - 1. / self._zoom[1] p0, p1 = min(p0, p1), max(p0, p1) self._pan[1] = np.clip(self._pan[1], p0, p1)
Constrain bounding box.
def predict(self, t): """Predict the smoothed function value at time t Parameters ---------- t : array_like Times at which to predict the result Returns ------- y : ndarray Smoothed values at time t """ t = np.asarray(t) return self._predict(np.ravel(t)).reshape(t.shape)
Predict the smoothed function value at time t Parameters ---------- t : array_like Times at which to predict the result Returns ------- y : ndarray Smoothed values at time t
def send(self, value): """ Send text to stdin. Can only be used on non blocking commands Args: value (str): the text to write on stdin Raises: TypeError: If command is blocking Returns: ShellCommand: return this ShellCommand instance for chaining """ if not self.block and self._stdin is not None: self.writer.write("{}\n".format(value)) return self else: raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
Send text to stdin. Can only be used on non blocking commands Args: value (str): the text to write on stdin Raises: TypeError: If command is blocking Returns: ShellCommand: return this ShellCommand instance for chaining
def kth_to_last_dict(head, k): """ This is a brute force method where we keep a dict the size of the list Then we check it for the value we need. If the key is not in the dict, our and statement will short circuit and return False """ if not (head and k > -1): return False d = dict() count = 0 while head: d[count] = head head = head.next count += 1 return len(d)-k in d and d[len(d)-k]
This is a brute force method where we keep a dict the size of the list Then we check it for the value we need. If the key is not in the dict, our and statement will short circuit and return False
def listunspent(self, address: str) -> list: '''Returns unspent transactions for given address.''' try: return cast(dict, self.ext_fetch('listunspent/' + address))['unspent_outputs'] except KeyError: raise InsufficientFunds('Insufficient funds.')
Returns unspent transactions for given address.
def get_next_event(event, now): """ Returns the next occurrence of a given event, relative to 'now'. The 'event' arg should be an iterable containing one element, namely the event we'd like to find the occurrence of. The reason for this is b/c the get_count() function of CountHandler, which this func makes use of, expects an iterable. CHANGED: The 'now' arg must be an instance of datetime.datetime() to allow time comparison (used to accept datetime.date() as well) """ year = now.year month = now.month day = now.day e_day = event[0].l_start_date.day e_end_day = event[0].l_end_date.day good_today = True if event[0].l_start_date.time() >= now.time() else False if event[0].starts_same_year_month_as(year, month) and \ e_day <= now.day <= e_end_day: occurrences = CountHandler(year, month, event).get_count() future_dates = (x for x in occurrences if x >= now.day) day = min(future_dates, key=lambda x: abs(x - now.day)) else: e_year = event[0].l_start_date.year e_month = event[0].l_start_date.month # convert to datetime.date() to be sure we can make a comparison if date(e_year, e_month, e_day) > date(now.year, now.month, now.day): # if the event hasn't started yet, then its next occurrence will # be on its start date, so return that. year = e_year month = e_month day = e_day else: occurrences = CountHandler(year, month, event).get_count() future_dates = [x for x in occurrences if x >= now.day] e_end_month = event[0].l_end_date.month if future_dates and future_dates[0] is day and not good_today: future_dates.pop(0) while not future_dates: month, year = inc_month(month, year) if event[0].repeats('YEARLY') and \ (month != e_month or month != e_end_month): continue occurrences = CountHandler(year, month, event).get_count() # we don't check for now.day here, b/c we're in a month past # whatever now is. As an example, if we checked for now.day # we'd get stuck in an infinite loop if this were a # monthly repeating event and our 'now' was on a day after the # event's l_end_date.day future_dates = [x for x in occurrences] day = min(future_dates) if event[0].repeats('WEEKDAY'): return check_weekday(year, month, day) return year, month, day
Returns the next occurrence of a given event, relative to 'now'. The 'event' arg should be an iterable containing one element, namely the event we'd like to find the occurrence of. The reason for this is b/c the get_count() function of CountHandler, which this func makes use of, expects an iterable. CHANGED: The 'now' arg must be an instance of datetime.datetime() to allow time comparison (used to accept datetime.date() as well)
def _set_active_policy(self, v, load=False): """ Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """active_policy must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)""", }) self.__active_policy = t if hasattr(self, '_set'): self._set()
Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy
def ellipse(self, x,y,w,h,style=''): "Draw a ellipse" if(style=='F'): op='f' elif(style=='FD' or style=='DF'): op='B' else: op='S' cx = x + w/2.0 cy = y + h/2.0 rx = w/2.0 ry = h/2.0 lx = 4.0/3.0*(math.sqrt(2)-1)*rx ly = 4.0/3.0*(math.sqrt(2)-1)*ry self._out(sprintf('%.2f %.2f m %.2f %.2f %.2f %.2f %.2f %.2f c', (cx+rx)*self.k, (self.h-cy)*self.k, (cx+rx)*self.k, (self.h-(cy-ly))*self.k, (cx+lx)*self.k, (self.h-(cy-ry))*self.k, cx*self.k, (self.h-(cy-ry))*self.k)) self._out(sprintf('%.2f %.2f %.2f %.2f %.2f %.2f c', (cx-lx)*self.k, (self.h-(cy-ry))*self.k, (cx-rx)*self.k, (self.h-(cy-ly))*self.k, (cx-rx)*self.k, (self.h-cy)*self.k)) self._out(sprintf('%.2f %.2f %.2f %.2f %.2f %.2f c', (cx-rx)*self.k, (self.h-(cy+ly))*self.k, (cx-lx)*self.k, (self.h-(cy+ry))*self.k, cx*self.k, (self.h-(cy+ry))*self.k)) self._out(sprintf('%.2f %.2f %.2f %.2f %.2f %.2f c %s', (cx+lx)*self.k, (self.h-(cy+ry))*self.k, (cx+rx)*self.k, (self.h-(cy+ly))*self.k, (cx+rx)*self.k, (self.h-cy)*self.k, op))
Draw a ellipse
def _construct_from_json(self, rec): """ Construct this Dagobah instance from a JSON document. """ self.delete() for required_key in ['dagobah_id', 'created_jobs']: setattr(self, required_key, rec[required_key]) for job_json in rec.get('jobs', []): self._add_job_from_spec(job_json) self.commit(cascade=True)
Construct this Dagobah instance from a JSON document.
def _sync_string_to(bin_or_str, string): """ Python 3 compliance: ensure two strings are the same type (unicode or binary) """ if isinstance(string, type(bin_or_str)): return string elif isinstance(string, binary_type): return string.decode(DEFAULT_ENCODING) else: return string.encode(DEFAULT_ENCODING)
Python 3 compliance: ensure two strings are the same type (unicode or binary)
def get_grades(self): """Gets the grades in this system ranked from highest to lowest. return: (osid.grading.GradeList) - the list of grades raise: IllegalState - ``is_based_on_grades()`` is ``false`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.Asset.get_asset_contents_template return GradeList( self._my_map['grades'], runtime=self._runtime, proxy=self._proxy)
Gets the grades in this system ranked from highest to lowest. return: (osid.grading.GradeList) - the list of grades raise: IllegalState - ``is_based_on_grades()`` is ``false`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def _do_search(self): """ Perform the mlt call, then convert that raw format into a SearchResults instance and return it. """ if self._results_cache is None: response = self.raw() results = self.to_python(response.get('hits', {}).get('hits', [])) self._results_cache = DictSearchResults( self.type, response, results, None) return self._results_cache
Perform the mlt call, then convert that raw format into a SearchResults instance and return it.
def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): """ Add an entry to the system crontab. """ raise NotImplementedError
Add an entry to the system crontab.
def get_tc_api(self, host, headers=None, cert=None, logger=None): ''' Gets HttpApi wrapped into a neat little package that raises TestStepFail if expected status code is not returned by the server. Default setting for expected status code is 200. Set expected to None when calling methods to ignore the expected status code parameter or set raiseException = False to disable raising the exception. ''' if logger is None and self.logger: logger = self.logger return Api(host, headers, cert, logger)
Gets HttpApi wrapped into a neat little package that raises TestStepFail if expected status code is not returned by the server. Default setting for expected status code is 200. Set expected to None when calling methods to ignore the expected status code parameter or set raiseException = False to disable raising the exception.
def torecarray(*args, **kwargs): """ Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``. """ import numpy as np return toarray(*args, **kwargs).view(np.recarray)
Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``.
def shader_substring(body, stack_frame=1): """ Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end. """ line_count = len(body.splitlines(True)) line_number = inspect.stack()[stack_frame][2] + 1 - line_count return """\ #line %d %s """ % (line_number, textwrap.dedent(body))
Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end.
def schedule_task(self, task_id): """Schedule a task. :param task_id: identifier of the task to schedule :raises NotFoundError: raised when the requested task is not found in the registry """ task = self.registry.get(task_id) job_args = self._build_job_arguments(task) archiving_cfg = task.archiving_cfg fetch_from_archive = False if not archiving_cfg else archiving_cfg.fetch_from_archive # Schedule the job as soon as possible queue = Q_ARCHIVE_JOBS if fetch_from_archive else Q_CREATION_JOBS job_id = self._scheduler.schedule_job_task(queue, task.task_id, job_args, delay=0) logger.info("Job #%s (task: %s) scheduled", job_id, task.task_id) return job_id
Schedule a task. :param task_id: identifier of the task to schedule :raises NotFoundError: raised when the requested task is not found in the registry
def parse_row(self, row, row_index, cell_mode=CellMode.cooked): """Parse a row according to the given cell_mode.""" return [self.parse_cell(cell, (col_index, row_index), cell_mode) \ for col_index, cell in enumerate(row)]
Parse a row according to the given cell_mode.
def timeseries(X, **kwargs): """Plot X. See timeseries_subplot.""" pl.figure(figsize=(2*rcParams['figure.figsize'][0], rcParams['figure.figsize'][1]), subplotpars=sppars(left=0.12, right=0.98, bottom=0.13)) timeseries_subplot(X, **kwargs)
Plot X. See timeseries_subplot.
def parse_filename_meta(filename): """ taken from suvi code by vhsu Parse the metadata from a product filename, either L1b or l2. - file start - file end - platform - product :param filename: string filename of product :return: (start datetime, end datetime, platform) """ common_pattern = "_%s_%s" % ( "(?P<product>[a-zA-Z]{3}[a-zA-Z]?-[a-zA-Z0-9]{2}[a-zA-Z0-9]?-[a-zA-Z0-9]{4}[a-zA-Z0-9]?)", # product l1b, or l2 "(?P<platform>[gG][1-9]{2})" # patform, like g16 ) patterns = { # all patterns must have the common componennt "l2_pattern": re.compile("%s_s(?P<start>[0-9]{8}T[0-9]{6})Z_e(?P<end>[0-9]{8}T[0-9]{6})Z" % common_pattern), "l1b_pattern": re.compile('%s_s(?P<start>[0-9]{14})_e(?P<end>[0-9]{14})' % common_pattern), "dayfile_pattern": re.compile("%s_d(?P<start>[0-9]{8})" % common_pattern), "monthfile_pattern": re.compile("%s_m(?P<start>[0-9]{6})" % common_pattern), "yearfile_pattern": re.compile("%s_y(?P<start>[0-9]{4})" % common_pattern), } match, dt_start, dt_end = None, None, None for pat_type, pat in patterns.items(): match = pat.search(filename) if match is not None: if pat_type == "l2_pattern": # parse l2 dt_start = datetime.strptime(match.group("start"), '%Y%m%dT%H%M%S') dt_end = datetime.strptime(match.group("end"), '%Y%m%dT%H%M%S') elif pat_type == "l1b_pattern": # parse l1b dt_start = datetime.strptime(match.group("start"), '%Y%j%H%M%S%f') dt_end = datetime.strptime(match.group("end"), '%Y%j%H%M%S%f') elif pat_type == "dayfile_pattern": dt_start = datetime.strptime(match.group("start"), "%Y%m%d") dt_end = dt_start + timedelta(hours=24) elif pat_type == "monthfile_pattern": dt_start = datetime.strptime(match.group("start"), "%Y%m") dt_end = datetime(dt_start.year, dt_start.month + 1, 1) # will raise exception in December, fix when needed elif pat_type == "yearfile_pattern": dt_start = datetime.strptime(match.group("start"), "%Y") dt_end = datetime(dt_start.year + 1, 1, 1) break if match is None: if "NCEI" in filename and ".fits" in filename: dt_start = datetime.strptime("T".join(filename.split("_")[4:6]), "%Y%m%dT%H%M%S") dt_end = dt_start angstroms = int(filename.split("_")[2]) atom = "Fe" if angstroms != 304 else "He" product = "SUVI-L1b-{}{}".format(atom, angstroms) return filename, dt_start, dt_end, "g16", product else: # we didn't find any matching patterns... raise ValueError("Timestamps not detected in filename: %s" % filename) else: return filename, dt_start, dt_end, match.group("platform"), match.group("product")
taken from suvi code by vhsu Parse the metadata from a product filename, either L1b or l2. - file start - file end - platform - product :param filename: string filename of product :return: (start datetime, end datetime, platform)
def _pi_id(self): """Try to detect id of a Raspberry Pi.""" # Check for Pi boards: pi_rev_code = self._pi_rev_code() if pi_rev_code: for model, codes in _PI_REV_CODES.items(): if pi_rev_code in codes: return model return None
Try to detect id of a Raspberry Pi.
def format_seq(self, outstream=None, linewidth=70): """ Print a sequence in a readable format. :param outstream: if `None`, formatted sequence is returned as a string; otherwise, it is treated as a file-like object and the formatted sequence is printed to the outstream :param linewidth: width for wrapping sequences over multiple lines; set to 0 for no wrapping """ if linewidth == 0 or len(self.seq) <= linewidth: if outstream is None: return self.seq else: print(self.seq, file=outstream) return i = 0 seq = '' while i < len(self.seq): if outstream is None: seq += self.seq[i:i+linewidth] + '\n' else: print(self.seq[i:i+linewidth], file=outstream) i += linewidth if outstream is None: return seq
Print a sequence in a readable format. :param outstream: if `None`, formatted sequence is returned as a string; otherwise, it is treated as a file-like object and the formatted sequence is printed to the outstream :param linewidth: width for wrapping sequences over multiple lines; set to 0 for no wrapping
def html_visit_inheritance_diagram(self, node): # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for HTML. This will insert a PNG with clickable image map. """ graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash # Create a mapping from fully-qualified class names to URLs. graphviz_output_format = self.builder.env.config.graphviz_output_format.upper() current_filename = self.builder.current_docname + self.builder.out_suffix urls = {} for child in node: if child.get('refuri') is not None: if graphviz_output_format == 'SVG': urls[child['reftitle']] = os.path.join("..", child.get('refuri')) else: urls[child['reftitle']] = child.get('refuri') elif child.get('refid') is not None: if graphviz_output_format == 'SVG': urls[child['reftitle']] = os.path.join('..', current_filename + '#' + child.get('refid')) else: urls[child['reftitle']] = '#' + child.get('refid') dotcode = graph.generate_dot(name, urls, env=self.builder.env) render_dot_html( self, node, dotcode, {}, 'inheritance', 'inheritance', alt='Inheritance diagram of ' + node['content'], link_to_svg='<i class="fa fa-external-link" aria-hidden="true"></i>'' SVG') raise nodes.SkipNode
Output the graph for HTML. This will insert a PNG with clickable image map.
def remove_udp_port(self, port): """ Removes an associated UDP port number from this project. :param port: UDP port number """ if port in self._used_udp_ports: self._used_udp_ports.remove(port)
Removes an associated UDP port number from this project. :param port: UDP port number
def timeout_selecting(self): """Timeout of selecting on SELECTING state. Not specifiyed in [:rfc:`7844`]. See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`. """ logger.debug('C2.1: T In %s, timeout receiving response to select.', self.current_state) if len(self.offers) >= MAX_OFFERS_COLLECTED: logger.debug('C2.2: T Maximum number of offers reached, ' 'raise REQUESTING.') raise self.REQUESTING() if self.discover_attempts >= MAX_ATTEMPTS_DISCOVER: logger.debug('C2.3: T Maximum number of discover retries is %s' ' and already sent %s.', MAX_ATTEMPTS_DISCOVER, self.discover_attempts) if len(self.offers) <= 0: logger.debug('C2.4: T. But no OFFERS where received, ' 'raise ERROR.') raise self.ERROR() logger.debug('C2.4: F. But there is some OFFERS, ' 'raise REQUESTING.') raise self.REQUESTING() logger.debug('C2.2: F. Still not received all OFFERS, but not ' 'max # attemps reached, raise SELECTING.') raise self.SELECTING()
Timeout of selecting on SELECTING state. Not specifiyed in [:rfc:`7844`]. See comments in :func:`dhcpcapfsm.DHCPCAPFSM.timeout_request`.
def delete(self, docids): """Delete documents from the current session.""" self.check_session() result = self.session.delete(docids) if self.autosession: self.commit() return result
Delete documents from the current session.
def _build_dictionary(self, results): """ Build model dictionary keyed by the relation's foreign key. :param results: The results :type results: Collection :rtype: dict """ foreign = self._foreign_key dictionary = {} for result in results: key = getattr(result.pivot, foreign) if key not in dictionary: dictionary[key] = [] dictionary[key].append(result) return dictionary
Build model dictionary keyed by the relation's foreign key. :param results: The results :type results: Collection :rtype: dict
def normalize_surfs(in_file, transform_file, newpath=None): """ Re-center GIFTI coordinates to fit align to native T1 space For midthickness surfaces, add MidThickness metadata Coordinate update based on: https://github.com/Washington-University/workbench/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91 and https://github.com/Washington-University/Pipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh#L147 """ img = nb.load(in_file) transform = load_transform(transform_file) pointset = img.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0] coords = pointset.data.T c_ras_keys = ('VolGeomC_R', 'VolGeomC_A', 'VolGeomC_S') ras = np.array([[float(pointset.metadata[key])] for key in c_ras_keys]) ones = np.ones((1, coords.shape[1]), dtype=coords.dtype) # Apply C_RAS translation to coordinates, then transform pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(coords.dtype) secondary = nb.gifti.GiftiNVPairs('AnatomicalStructureSecondary', 'MidThickness') geom_type = nb.gifti.GiftiNVPairs('GeometricType', 'Anatomical') has_ass = has_geo = False for nvpair in pointset.meta.data: # Remove C_RAS translation from metadata to avoid double-dipping in FreeSurfer if nvpair.name in c_ras_keys: nvpair.value = '0.000000' # Check for missing metadata elif nvpair.name == secondary.name: has_ass = True elif nvpair.name == geom_type.name: has_geo = True fname = os.path.basename(in_file) # Update metadata for MidThickness/graymid surfaces if 'midthickness' in fname.lower() or 'graymid' in fname.lower(): if not has_ass: pointset.meta.data.insert(1, secondary) if not has_geo: pointset.meta.data.insert(2, geom_type) if newpath is not None: newpath = os.getcwd() out_file = os.path.join(newpath, fname) img.to_filename(out_file) return out_file
Re-center GIFTI coordinates to fit align to native T1 space For midthickness surfaces, add MidThickness metadata Coordinate update based on: https://github.com/Washington-University/workbench/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91 and https://github.com/Washington-University/Pipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh#L147
def calculate_average_scores_on_graph( graph: BELGraph, key: Optional[str] = None, tag: Optional[str] = None, default_score: Optional[float] = None, runs: Optional[int] = None, use_tqdm: bool = False, ): """Calculate the scores over all biological processes in the sub-graph. As an implementation, it simply computes the sub-graphs then calls :func:`calculate_average_scores_on_subgraphs` as described in that function's documentation. :param graph: A BEL graph with heats already on the nodes :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param use_tqdm: Should there be a progress bar for runners? :return: A dictionary of {pybel node tuple: results tuple} :rtype: dict[tuple, tuple] Suggested usage with :mod:`pandas`: >>> import pandas as pd >>> from pybel_tools.analysis.heat import calculate_average_scores_on_graph >>> graph = ... # load graph and data >>> scores = calculate_average_scores_on_graph(graph) >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS) """ subgraphs = generate_bioprocess_mechanisms(graph, key=key) scores = calculate_average_scores_on_subgraphs( subgraphs, key=key, tag=tag, default_score=default_score, runs=runs, use_tqdm=use_tqdm ) return scores
Calculate the scores over all biological processes in the sub-graph. As an implementation, it simply computes the sub-graphs then calls :func:`calculate_average_scores_on_subgraphs` as described in that function's documentation. :param graph: A BEL graph with heats already on the nodes :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param use_tqdm: Should there be a progress bar for runners? :return: A dictionary of {pybel node tuple: results tuple} :rtype: dict[tuple, tuple] Suggested usage with :mod:`pandas`: >>> import pandas as pd >>> from pybel_tools.analysis.heat import calculate_average_scores_on_graph >>> graph = ... # load graph and data >>> scores = calculate_average_scores_on_graph(graph) >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS)
def add_suffix(fullname, suffix): """ Add suffix to a full file name""" name, ext = os.path.splitext(fullname) return name + '_' + suffix + ext
Add suffix to a full file name
def get_num_processes(): """Return the number of processes to use in parallel.""" cpu_count = multiprocessing.cpu_count() if config.NUMBER_OF_CORES == 0: raise ValueError( 'Invalid NUMBER_OF_CORES; value may not be 0.') if config.NUMBER_OF_CORES > cpu_count: log.info('Requesting %s cores; only %s available', config.NUMBER_OF_CORES, cpu_count) return cpu_count if config.NUMBER_OF_CORES < 0: num = cpu_count + config.NUMBER_OF_CORES + 1 if num <= 0: raise ValueError( 'Invalid NUMBER_OF_CORES; negative value is too negative: ' 'requesting {} cores, {} available.'.format(num, cpu_count)) return num return config.NUMBER_OF_CORES
Return the number of processes to use in parallel.
def destination(self, point, bearing, distance=None): """ TODO docs. """ point = Point(point) lat1 = units.radians(degrees=point.latitude) lng1 = units.radians(degrees=point.longitude) bearing = units.radians(degrees=bearing) if distance is None: distance = self if isinstance(distance, Distance): distance = distance.kilometers d_div_r = float(distance) / self.RADIUS lat2 = asin( sin(lat1) * cos(d_div_r) + cos(lat1) * sin(d_div_r) * cos(bearing) ) lng2 = lng1 + atan2( sin(bearing) * sin(d_div_r) * cos(lat1), cos(d_div_r) - sin(lat1) * sin(lat2) ) return Point(units.degrees(radians=lat2), units.degrees(radians=lng2))
TODO docs.
def eintr_retry(exc_type, f, *args, **kwargs): """Calls a function. If an error of the given exception type with interrupted system call (EINTR) occurs calls the function again. """ while True: try: return f(*args, **kwargs) except exc_type as exc: if exc.errno != EINTR: raise else: break
Calls a function. If an error of the given exception type with interrupted system call (EINTR) occurs calls the function again.
def _get_bonds(self, mol): """ Find all the bond in a molcule Args: mol: the molecule. pymatgen Molecule object Returns: List of tuple. Each tuple correspond to a bond represented by the id of the two end atoms. """ num_atoms = len(mol) # index starting from 0 if self.ignore_ionic_bond: covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list] else: covalent_atoms = list(range(num_atoms)) all_pairs = list(itertools.combinations(covalent_atoms, 2)) pair_dists = [mol.get_distance(*p) for p in all_pairs] elements = mol.composition.as_dict().keys() unavailable_elements = list(set(elements) - set(self.covalent_radius.keys())) if len(unavailable_elements) > 0: raise ValueError("The covalent radius for element {} is not " "available".format(unavailable_elements)) bond_13 = self.get_13_bonds(self.priority_bonds) max_length = [(self.covalent_radius[mol.sites[p[0]].specie.symbol] + self.covalent_radius[mol.sites[p[1]].specie.symbol]) * (1 + (self.priority_cap if p in self.priority_bonds else (self.bond_length_cap if p not in bond_13 else self.bond_13_cap))) * (0.1 if (self.ignore_halogen_self_bond and p not in self.priority_bonds and mol.sites[p[0]].specie.symbol in self.halogen_list and mol.sites[p[1]].specie.symbol in self.halogen_list) else 1.0) for p in all_pairs] bonds = [bond for bond, dist, cap in zip(all_pairs, pair_dists, max_length) if dist <= cap] return bonds
Find all the bond in a molcule Args: mol: the molecule. pymatgen Molecule object Returns: List of tuple. Each tuple correspond to a bond represented by the id of the two end atoms.
def now(self, when=None): """Set the current value to the correct tuple based on the seconds since the epoch. If 'when' is not provided, get the current time from the task manager. """ if when is None: when = _TaskManager().get_time() tup = time.localtime(when) self.value = (tup[0]-1900, tup[1], tup[2], tup[6] + 1) return self
Set the current value to the correct tuple based on the seconds since the epoch. If 'when' is not provided, get the current time from the task manager.
def profile(request, status=200): """ Get the user's profile. If the user has no assigned profile, the HTTP 404 is returned. Make a POST request to modify the user's profile. GET parameters: html turn on the HTML version of the API username: username of user (only for users with public profile) stats: attache addition user statistics POST parameters (JSON): send_emails: switcher turning on sending e-mails to user public: swicher making the user's profile publicly available user: password: user's password password_check: user's password again to check it first_name (optional): user's first name last_name (optional): user's last name """ if request.method == 'GET': if request.GET.get("username", False): try: user_profile = User.objects.get(username=request.GET.get("username"), userprofile__public=True).userprofile except ObjectDoesNotExist: raise Http404("user not found or have not public profile") else: user_id = get_user_id(request) if get_config('proso_user', 'google.openid.migration', default=True) and not is_user_id_overridden(request): migrated_user = migrate_google_openid_user(request.user) if migrated_user is not None: auth.logout(request) migrated_user.backend = 'social.backends.google.GoogleOAuth2' auth.login(request, migrated_user) user_profile = get_object_or_404(UserProfile, user_id=user_id) return render_json( request, user_profile, status=status, template='user_profile.html', help_text=profile.__doc__) elif request.method == 'POST': with transaction.atomic(): to_save = json_body(request.body.decode("utf-8")) user_id = get_user_id(request) user_profile = get_object_or_404(UserProfile, user_id=user_id) user = to_save.get('user', None) if 'send_emails' in to_save: user_profile.send_emails = bool(to_save['send_emails']) if 'public' in to_save: user_profile.public = bool(to_save['public']) if user: error = _save_user(request, user, new=False) if error: return render_json(request, error, template='user_json.html', status=400) if 'properties' in to_save: user_profile.save_properties(to_save['properties']) user_profile.save() request.method = "GET" return profile(request, status=202) else: return HttpResponseBadRequest("method %s is not allowed".format(request.method))
Get the user's profile. If the user has no assigned profile, the HTTP 404 is returned. Make a POST request to modify the user's profile. GET parameters: html turn on the HTML version of the API username: username of user (only for users with public profile) stats: attache addition user statistics POST parameters (JSON): send_emails: switcher turning on sending e-mails to user public: swicher making the user's profile publicly available user: password: user's password password_check: user's password again to check it first_name (optional): user's first name last_name (optional): user's last name
def visit(self, node): """ Try to replace if node match the given pattern or keep going. """ for pattern, replace in know_pattern: check = Check(node, dict()) if check.visit(pattern): node = PlaceholderReplace(check.placeholders).visit(replace()) self.update = True return super(PatternTransform, self).visit(node)
Try to replace if node match the given pattern or keep going.
def tf_initialize(self, x_init, b): """ Initialization step preparing the arguments for the first iteration of the loop body: $x_0, 0, p_0, r_0, r_0^2$. Args: x_init: Initial solution guess $x_0$, zero vector if None. b: The right-hand side $b$ of the system of linear equations. Returns: Initial arguments for tf_step. """ if x_init is None: # Initial guess is zero vector if not given. x_init = [tf.zeros(shape=util.shape(t)) for t in b] initial_args = super(ConjugateGradient, self).tf_initialize(x_init) # r_0 := b - A * x_0 # c_0 := r_0 conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))] # r_0^2 := r^T * r squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual]) return initial_args + (conjugate, residual, squared_residual)
Initialization step preparing the arguments for the first iteration of the loop body: $x_0, 0, p_0, r_0, r_0^2$. Args: x_init: Initial solution guess $x_0$, zero vector if None. b: The right-hand side $b$ of the system of linear equations. Returns: Initial arguments for tf_step.
def gen_locale(locale, **kwargs): ''' Generate a locale. Options: .. versionadded:: 2014.7.0 :param locale: Any locale listed in /usr/share/i18n/locales or /usr/share/i18n/SUPPORTED for Debian and Gentoo based distributions, which require the charmap to be specified as part of the locale when generating it. verbose Show extra warnings about errors that are normally ignored. CLI Example: .. code-block:: bash salt '*' locale.gen_locale en_US.UTF-8 salt '*' locale.gen_locale 'en_IE.UTF-8 UTF-8' # Debian/Gentoo only ''' on_debian = __grains__.get('os') == 'Debian' on_ubuntu = __grains__.get('os') == 'Ubuntu' on_gentoo = __grains__.get('os_family') == 'Gentoo' on_suse = __grains__.get('os_family') == 'Suse' on_solaris = __grains__.get('os_family') == 'Solaris' if on_solaris: # all locales are pre-generated return locale in __salt__['locale.list_avail']() locale_info = salt.utils.locales.split_locale(locale) locale_search_str = '{0}_{1}'.format(locale_info['language'], locale_info['territory']) # if the charmap has not been supplied, normalize by appening it if not locale_info['charmap'] and not on_ubuntu: locale_info['charmap'] = locale_info['codeset'] locale = salt.utils.locales.join_locale(locale_info) if on_debian or on_gentoo: # file-based search search = '/usr/share/i18n/SUPPORTED' valid = __salt__['file.search'](search, '^{0}$'.format(locale), flags=re.MULTILINE) else: # directory-based search if on_suse: search = '/usr/share/locale' else: search = '/usr/share/i18n/locales' try: valid = locale_search_str in os.listdir(search) except OSError as ex: log.error(ex) raise CommandExecutionError( "Locale \"{0}\" is not available.".format(locale)) if not valid: log.error( 'The provided locale "%s" is not found in %s', locale, search) return False if os.path.exists('/etc/locale.gen'): __salt__['file.replace']( '/etc/locale.gen', r'^\s*#\s*{0}\s*$'.format(locale), '{0}\n'.format(locale), append_if_not_found=True ) elif on_ubuntu: __salt__['file.touch']( '/var/lib/locales/supported.d/{0}'.format(locale_info['language']) ) __salt__['file.replace']( '/var/lib/locales/supported.d/{0}'.format(locale_info['language']), locale, locale, append_if_not_found=True ) if salt.utils.path.which('locale-gen'): cmd = ['locale-gen'] if on_gentoo: cmd.append('--generate') if on_ubuntu: cmd.append(salt.utils.locales.normalize_locale(locale)) else: cmd.append(locale) elif salt.utils.path.which('localedef'): cmd = ['localedef', '--force', '-i', locale_search_str, '-f', locale_info['codeset'], '{0}.{1}'.format(locale_search_str, locale_info['codeset']), kwargs.get('verbose', False) and '--verbose' or '--quiet'] else: raise CommandExecutionError( 'Command "locale-gen" or "localedef" was not found on this system.') res = __salt__['cmd.run_all'](cmd) if res['retcode']: log.error(res['stderr']) if kwargs.get('verbose'): return res else: return res['retcode'] == 0
Generate a locale. Options: .. versionadded:: 2014.7.0 :param locale: Any locale listed in /usr/share/i18n/locales or /usr/share/i18n/SUPPORTED for Debian and Gentoo based distributions, which require the charmap to be specified as part of the locale when generating it. verbose Show extra warnings about errors that are normally ignored. CLI Example: .. code-block:: bash salt '*' locale.gen_locale en_US.UTF-8 salt '*' locale.gen_locale 'en_IE.UTF-8 UTF-8' # Debian/Gentoo only
def _set_collector(self, v, load=False): """ Setter method for collector, mapped from YANG variable /telemetry/collector (list) If this variable is read-only (config: false) in the source YANG file, then _set_collector is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collector() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("collector_name",collector.collector, yang_name="collector", rest_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TelemetryCollector', u'info': u'Telemetry collector Configuration'}}), is_container='list', yang_name="collector", rest_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TelemetryCollector', u'info': u'Telemetry collector Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """collector must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("collector_name",collector.collector, yang_name="collector", rest_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TelemetryCollector', u'info': u'Telemetry collector Configuration'}}), is_container='list', yang_name="collector", rest_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TelemetryCollector', u'info': u'Telemetry collector Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__collector = t if hasattr(self, '_set'): self._set()
Setter method for collector, mapped from YANG variable /telemetry/collector (list) If this variable is read-only (config: false) in the source YANG file, then _set_collector is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collector() directly.
def _get_dvs_infrastructure_traffic_resources(dvs_name, dvs_infra_traffic_ress): ''' Returns a list of dict representations of the DVS infrastructure traffic resource dvs_name The name of the DVS dvs_infra_traffic_ress The DVS infrastructure traffic resources ''' log.trace('Building the dicts of the DVS \'%s\' infrastructure ' 'traffic resources', dvs_name) res_dicts = [] for res in dvs_infra_traffic_ress: res_dict = {'key': res.key, 'limit': res.allocationInfo.limit, 'reservation': res.allocationInfo.reservation} if res.allocationInfo.shares: res_dict.update({'num_shares': res.allocationInfo.shares.shares, 'share_level': res.allocationInfo.shares.level}) res_dicts.append(res_dict) return res_dicts
Returns a list of dict representations of the DVS infrastructure traffic resource dvs_name The name of the DVS dvs_infra_traffic_ress The DVS infrastructure traffic resources
def get_user_groups(name, sid=False): ''' Get the groups to which a user belongs Args: name (str): The user name to query sid (bool): True will return a list of SIDs, False will return a list of group names Returns: list: A list of group names or sids ''' if name == 'SYSTEM': # 'win32net.NetUserGetLocalGroups' will fail if you pass in 'SYSTEM'. groups = [name] else: groups = win32net.NetUserGetLocalGroups(None, name) if not sid: return groups ret_groups = set() for group in groups: ret_groups.add(get_sid_from_name(group)) return ret_groups
Get the groups to which a user belongs Args: name (str): The user name to query sid (bool): True will return a list of SIDs, False will return a list of group names Returns: list: A list of group names or sids
def parse_text(document, container, element): "Parse text element." txt = None alternate = element.find(_name('{{{mc}}}AlternateContent')) if alternate is not None: parse_alternate(document, container, alternate) br = element.find(_name('{{{w}}}br')) if br is not None: if _name('{{{w}}}type') in br.attrib: _type = br.attrib[_name('{{{w}}}type')] brk = doc.Break(_type) else: brk = doc.Break() container.elements.append(brk) t = element.find(_name('{{{w}}}t')) if t is not None: txt = doc.Text(t.text) txt.parent = container container.elements.append(txt) rpr = element.find(_name('{{{w}}}rPr')) if rpr is not None: # Notice it is using txt as container parse_previous_properties(document, txt, rpr) for r in element.findall(_name('{{{w}}}r')): parse_text(document, container, r) foot = element.find(_name('{{{w}}}footnoteReference')) if foot is not None: parse_footnote(document, container, foot) end = element.find(_name('{{{w}}}endnoteReference')) if end is not None: parse_endnote(document, container, end) sym = element.find(_name('{{{w}}}sym')) if sym is not None: _font = sym.attrib[_name('{{{w}}}font')] _char = sym.attrib[_name('{{{w}}}char')] container.elements.append(doc.Symbol(font=_font, character=_char)) image = element.find(_name('{{{w}}}drawing')) if image is not None: parse_drawing(document, container, image) refe = element.find(_name('{{{w}}}commentReference')) if refe is not None: _m = doc.Comment(refe.attrib[_name('{{{w}}}id')], 'reference') container.elements.append(_m) return
Parse text element.
def response(self, msgid, error, result): """Handle a results message given to the proxy by the protocol object.""" if error: self.requests[msgid].errback(Exception(str(error))) else: self.requests[msgid].callback(result) del self.requests[msgid]
Handle a results message given to the proxy by the protocol object.
def load(self, args): """ Load a simulation from the given arguments. """ self._queue.append(tc.CMD_LOAD) self._string += struct.pack("!BiB", 0, 1 + 4 + 1 + 1 + 4 + sum(map(len, args)) + 4 * len(args), tc.CMD_LOAD) self._packStringList(args) self._sendExact()
Load a simulation from the given arguments.
def reverseCommit(self): """ Re-insert the previously deleted line. """ # Loop over all lines in the rectangle to remove the # previously yanked strings. col = self.cursorPos[1] for ii, text in enumerate(self.insertedText): line = ii + self.cursorPos[0] # Select as many characters as the string is long and remove # them. self.qteWidget.setSelection(line, col, line, col + len(text)) self.baseClass.removeSelectedText() # Place the cursor at the original position. self.qteWidget.setCursorPosition(*self.cursorPos)
Re-insert the previously deleted line.
def call_binop(self, context, operator, left, right): """For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.binop_table[operator](left, right)
For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6
def consume_message(self, header, message): """Consume a message""" logmessage = { "time": (time.time() % 1000) * 1000, "header": "", "message": message, } if header: logmessage["header"] = ( json.dumps(header, indent=2) + "\n" + "----------------" + "\n" ) if isinstance(message, dict): logmessage["message"] = ( json.dumps(message, indent=2) + "\n" + "----------------" + "\n" ) print("=== Consume ====\n{header}{message}".format(**logmessage)) self.log.info("Received message @{time}".format(**logmessage)) self.log.debug( "Received message @{time}\n{header}{message}".format(**logmessage) ) time.sleep(0.1)
Consume a message
def start_adc(self, channel, gain=1, data_rate=None): """Start continuous ADC conversions on the specified channel (0-3). Will return an initial conversion result, then call the get_last_result() function to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' # Start continuous reads and set the mux value to the channel plus # the highest bit (bit 3) set. return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS)
Start continuous ADC conversions on the specified channel (0-3). Will return an initial conversion result, then call the get_last_result() function to read the most recent conversion result. Call stop_adc() to stop conversions.
def parse(self, fp, headersonly=False): """Create a message structure from the data in a binary file. Reads all the data from the file and returns the root of the message structure. Optional headersonly is a flag specifying whether to stop parsing after reading the headers or not. The default is False, meaning it parses the entire contents of the file. """ fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape') with fp: return self.parser.parse(fp, headersonly)
Create a message structure from the data in a binary file. Reads all the data from the file and returns the root of the message structure. Optional headersonly is a flag specifying whether to stop parsing after reading the headers or not. The default is False, meaning it parses the entire contents of the file.
def track_dependency(self, name:str, data:str, type:str=None, target:str=None, duration:int=None, success:bool=None, result_code:str=None, properties:Dict[str, object]=None, measurements:Dict[str, object]=None, dependency_id:str=None): """ Sends a single dependency telemetry that was captured for the application. :param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template. :param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters. :param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None) :param target: the target site of a dependency call. Examples are server name, host address. (default to: None) :param duration: the number of milliseconds that this dependency call lasted. (defaults to: None) :param success: true if the dependency call ended in success, false otherwise. (defaults to: None) :param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None) :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None) :param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None) """ raise NotImplementedError('BotTelemetryClient.track_dependency(): is not implemented.')
Sends a single dependency telemetry that was captured for the application. :param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template. :param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters. :param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None) :param target: the target site of a dependency call. Examples are server name, host address. (default to: None) :param duration: the number of milliseconds that this dependency call lasted. (defaults to: None) :param success: true if the dependency call ended in success, false otherwise. (defaults to: None) :param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None) :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None) :param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
def _check_infinite_flows(self, steps, flows=None): """ Recursively loop through the flow_config and check if there are any cycles. :param steps: Set of step definitions to loop through :param flows: Flows already visited. :return: None """ if flows is None: flows = [] for step in steps.values(): if "flow" in step: flow = step["flow"] if flow == "None": continue if flow in flows: raise FlowInfiniteLoopError( "Infinite flows detected with flow {}".format(flow) ) flows.append(flow) flow_config = self.project_config.get_flow(flow) self._check_infinite_flows(flow_config.steps, flows)
Recursively loop through the flow_config and check if there are any cycles. :param steps: Set of step definitions to loop through :param flows: Flows already visited. :return: None
def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'): """Opens the Windows Registry file specified by the path specification. Args: path_specification (dfvfs.PathSpec): path specification. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None. """ if not path_specification: return None file_entry = self._file_system.GetFileEntryByPathSpec(path_specification) if file_entry is None: return None file_object = file_entry.GetFileObject() if file_object is None: return None registry_file = dfwinreg_regf.REGFWinRegistryFile( ascii_codepage=ascii_codepage) try: registry_file.Open(file_object) except IOError as exception: logger.warning( 'Unable to open Windows Registry file with error: {0!s}'.format( exception)) file_object.close() return None return registry_file
Opens the Windows Registry file specified by the path specification. Args: path_specification (dfvfs.PathSpec): path specification. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None.
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = CheckBox(self.get_context(), None, d.style or "@attr/checkboxStyle")
Create the underlying widget.
def get_licenses(self): """ :calls: `GET /licenses <https://developer.github.com/v3/licenses/#list-all-licenses>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.License.License` """ url_parameters = dict() return github.PaginatedList.PaginatedList( github.License.License, self.__requester, "/licenses", url_parameters )
:calls: `GET /licenses <https://developer.github.com/v3/licenses/#list-all-licenses>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.License.License`
def get_object(self, item): """ Returns a StorageObject matching the specified item. If no such object exists, a NotFound exception is raised. If 'item' is not a string, that item is returned unchanged. """ if isinstance(item, six.string_types): item = self.object_manager.get(item) return item
Returns a StorageObject matching the specified item. If no such object exists, a NotFound exception is raised. If 'item' is not a string, that item is returned unchanged.
def _readconfig(): """Configures environment variables""" config = ConfigParser.SafeConfigParser() try: found = config.read(littlechef.CONFIGFILE) except ConfigParser.ParsingError as e: abort(str(e)) if not len(found): try: found = config.read(['config.cfg', 'auth.cfg']) except ConfigParser.ParsingError as e: abort(str(e)) if len(found): print('\nDeprecationWarning: deprecated config file name \'{0}\'.' ' Use {1}'.format(found[0], littlechef.CONFIGFILE)) else: abort('No {0} file found in the current ' 'directory'.format(littlechef.CONFIGFILE)) in_a_kitchen, missing = _check_appliances() missing_str = lambda m: ' and '.join(', '.join(m).rsplit(', ', 1)) if not in_a_kitchen: abort("Couldn't find {0}. " "Are you executing 'fix' outside of a kitchen?\n" "To create a new kitchen in the current directory " " type 'fix new_kitchen'".format(missing_str(missing))) # We expect an ssh_config file here, # and/or a user, (password/keyfile) pair try: env.ssh_config_path = config.get('userinfo', 'ssh-config') except ConfigParser.NoSectionError: abort('You need to define a "userinfo" section' ' in the config file. Refer to the README for help ' '(http://github.com/tobami/littlechef)') except ConfigParser.NoOptionError: env.ssh_config_path = None if env.ssh_config_path: env.ssh_config = _SSHConfig() env.ssh_config_path = os.path.expanduser(env.ssh_config_path) env.use_ssh_config = True try: env.ssh_config.parse(open(env.ssh_config_path)) except IOError: abort("Couldn't open the ssh-config file " "'{0}'".format(env.ssh_config_path)) except Exception: abort("Couldn't parse the ssh-config file " "'{0}'".format(env.ssh_config_path)) else: env.ssh_config = None # check for a gateway try: env.gateway = config.get('connection', 'gateway') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): env.gateway = None # check for http_proxy which will be put into solo.rb try: env.http_proxy = config.get('connection', 'http_proxy') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): env.http_proxy = None try: env.https_proxy = config.get('connection', 'https_proxy') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): env.https_proxy = None try: env.remove_data_bags = config.get('userinfo', 'remove_data_bags') except ConfigParser.NoOptionError: env.remove_data_bags = False # Check for an encrypted_data_bag_secret file and set the env option try: env.encrypted_data_bag_secret = config.get('userinfo', 'encrypted_data_bag_secret') except ConfigParser.NoOptionError: env.encrypted_data_bag_secret = None if env.encrypted_data_bag_secret: env.encrypted_data_bag_secret = os.path.expanduser( env.encrypted_data_bag_secret) try: open(env.encrypted_data_bag_secret) except IOError as e: abort("Failed to open encrypted_data_bag_secret file at " "'{0}'".format(env.encrypted_data_bag_secret)) try: sudo_prefix = config.get('ssh', 'sudo_prefix', raw=True) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): pass else: env.sudo_prefix = sudo_prefix try: env.user = config.get('userinfo', 'user') except ConfigParser.NoOptionError: if not env.ssh_config_path: msg = 'You need to define a user in the "userinfo" section' msg += ' of {0}. Refer to the README for help' msg += ' (http://github.com/tobami/littlechef)' abort(msg.format(littlechef.CONFIGFILE)) user_specified = False else: user_specified = True try: env.password = config.get('userinfo', 'password') or None except ConfigParser.NoOptionError: pass try: # If keypair-file is empty, assign None or fabric will try to read key env.key_filename = config.get('userinfo', 'keypair-file') or None except ConfigParser.NoOptionError: pass if (user_specified and not env.password and not env.key_filename and not env.ssh_config): abort('You need to define a password, keypair file, or ssh-config ' 'file in {0}'.format(littlechef.CONFIGFILE)) # Node's Chef Solo working directory for storing cookbooks, roles, etc. try: env.node_work_path = os.path.expanduser(config.get('kitchen', 'node_work_path')) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): env.node_work_path = littlechef.node_work_path else: if not env.node_work_path: abort('The "node_work_path" option cannot be empty') # Follow symlinks try: env.follow_symlinks = config.getboolean('kitchen', 'follow_symlinks') except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): env.follow_symlinks = False try: env.berksfile = config.get('kitchen', 'berksfile') except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e: env.berksfile = None else: try: env.berksfile_cookbooks_directory = config.get('kitchen', 'berksfile_cookbooks_directory') littlechef.cookbook_paths.append(env.berksfile_cookbooks_directory) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e: if env.berksfile: env.berksfile_cookbooks_directory = tempfile.mkdtemp('littlechef-berks') littlechef.cookbook_paths.append(env.berksfile_cookbooks_directory) else: env.berksfile_cookbooks_directory = None chef.ensure_berksfile_cookbooks_are_installed() # Upload Directory try: env.sync_packages_dest_dir = config.get('sync-packages', 'dest-dir') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): env.sync_packages_dest_dir = None # Local Directory try: env.sync_packages_local_dir = config.get('sync-packages', 'local-dir') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): env.sync_packages_local_dir = None try: env.autodeploy_chef = config.get('userinfo', 'autodeploy_chef') or None except ConfigParser.NoOptionError: env.autodeploy_chef = None
Configures environment variables
def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None
Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort'
def compute_effsize(x, y, paired=False, eftype='cohen'): """Calculate effect size between two set of observations. Parameters ---------- x : np.array or list First set of observations. y : np.array or list Second set of observations. paired : boolean If True, uses Cohen d-avg formula to correct for repeated measurements (Cumming 2012) eftype : string Desired output effect size. Available methods are :: 'none' : no effect size 'cohen' : Unbiased Cohen d 'hedges' : Hedges g 'glass': Glass delta 'r' : correlation coefficient 'eta-square' : Eta-square 'odds-ratio' : Odds ratio 'AUC' : Area Under the Curve 'CLES' : Common language effect size Returns ------- ef : float Effect size See Also -------- convert_effsize : Conversion between effect sizes. compute_effsize_from_t : Convert a T-statistic to an effect size. Notes ----- Missing values are automatically removed from the data. If ``x`` and ``y`` are paired, the entire row is removed. If ``x`` and ``y`` are independent, the Cohen's d is: .. math:: d = \\frac{\\overline{X} - \\overline{Y}} {\\sqrt{\\frac{(n_{1} - 1)\\sigma_{1}^{2} + (n_{2} - 1) \\sigma_{2}^{2}}{n1 + n2 - 2}}} If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed: .. math:: d_{avg} = \\frac{\\overline{X} - \\overline{Y}} {0.5 * (\\sigma_1 + \\sigma_2)} The Cohen’s d is a biased estimate of the population effect size, especially for small samples (n < 20). It is often preferable to use the corrected effect size, or Hedges’g, instead: .. math:: g = d * (1 - \\frac{3}{4(n_1 + n_2) - 9}) If eftype = 'glass', the Glass :math:`\\delta` is reported, using the group with the lowest variance as the control group: .. math:: \\delta = \\frac{\\overline{X} - \\overline{Y}}{\\sigma_{control}} References ---------- .. [1] Lakens, D., 2013. Calculating and reporting effect sizes to facilitate cumulative science: a practical primer for t-tests and ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863 .. [2] Cumming, Geoff. Understanding the new statistics: Effect sizes, confidence intervals, and meta-analysis. Routledge, 2013. Examples -------- 1. Compute Cohen d from two independent set of observations. >>> import numpy as np >>> from pingouin import compute_effsize >>> np.random.seed(123) >>> x = np.random.normal(2, size=100) >>> y = np.random.normal(2.3, size=95) >>> d = compute_effsize(x=x, y=y, eftype='cohen', paired=False) >>> print(d) -0.2835170152506578 2. Compute Hedges g from two paired set of observations. >>> import numpy as np >>> from pingouin import compute_effsize >>> x = [1.62, 2.21, 3.79, 1.66, 1.86, 1.87, 4.51, 4.49, 3.3 , 2.69] >>> y = [0.91, 3., 2.28, 0.49, 1.42, 3.65, -0.43, 1.57, 3.27, 1.13] >>> g = compute_effsize(x=x, y=y, eftype='hedges', paired=True) >>> print(g) 0.8370985097811404 3. Compute Glass delta from two independent set of observations. The group with the lowest variance will automatically be selected as the control. >>> import numpy as np >>> from pingouin import compute_effsize >>> np.random.seed(123) >>> x = np.random.normal(2, scale=1, size=50) >>> y = np.random.normal(2, scale=2, size=45) >>> d = compute_effsize(x=x, y=y, eftype='glass') >>> print(d) -0.1170721973604153 """ # Check arguments if not _check_eftype(eftype): err = "Could not interpret input '{}'".format(eftype) raise ValueError(err) x = np.asarray(x) y = np.asarray(y) if x.size != y.size and paired: warnings.warn("x and y have unequal sizes. Switching to " "paired == False.") paired = False # Remove rows with missing values x, y = remove_na(x, y, paired=paired) nx, ny = x.size, y.size if ny == 1: # Case 1: One-sample Test d = (x.mean() - y) / x.std(ddof=1) return d if eftype.lower() == 'glass': # Find group with lowest variance sd_control = np.min([x.std(ddof=1), y.std(ddof=1)]) d = (x.mean() - y.mean()) / sd_control return d elif eftype.lower() == 'r': # Return correlation coefficient (useful for CI bootstrapping) from scipy.stats import pearsonr r, _ = pearsonr(x, y) return r elif eftype.lower() == 'cles': # Compute exact CLES diff = x[:, None] - y return max((diff < 0).sum(), (diff > 0).sum()) / diff.size else: # Test equality of variance of data with a stringent threshold # equal_var, p = homoscedasticity(x, y, alpha=.001) # if not equal_var: # print('Unequal variances (p<.001). You should report', # 'Glass delta instead.') # Compute unbiased Cohen's d effect size if not paired: # https://en.wikipedia.org/wiki/Effect_size dof = nx + ny - 2 poolsd = np.sqrt(((nx - 1) * x.var(ddof=1) + (ny - 1) * y.var(ddof=1)) / dof) d = (x.mean() - y.mean()) / poolsd else: # Report Cohen d-avg (Cumming 2012; Lakens 2013) d = (x.mean() - y.mean()) / (.5 * (x.std(ddof=1) + y.std(ddof=1))) return convert_effsize(d, 'cohen', eftype, nx=nx, ny=ny)
Calculate effect size between two set of observations. Parameters ---------- x : np.array or list First set of observations. y : np.array or list Second set of observations. paired : boolean If True, uses Cohen d-avg formula to correct for repeated measurements (Cumming 2012) eftype : string Desired output effect size. Available methods are :: 'none' : no effect size 'cohen' : Unbiased Cohen d 'hedges' : Hedges g 'glass': Glass delta 'r' : correlation coefficient 'eta-square' : Eta-square 'odds-ratio' : Odds ratio 'AUC' : Area Under the Curve 'CLES' : Common language effect size Returns ------- ef : float Effect size See Also -------- convert_effsize : Conversion between effect sizes. compute_effsize_from_t : Convert a T-statistic to an effect size. Notes ----- Missing values are automatically removed from the data. If ``x`` and ``y`` are paired, the entire row is removed. If ``x`` and ``y`` are independent, the Cohen's d is: .. math:: d = \\frac{\\overline{X} - \\overline{Y}} {\\sqrt{\\frac{(n_{1} - 1)\\sigma_{1}^{2} + (n_{2} - 1) \\sigma_{2}^{2}}{n1 + n2 - 2}}} If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed: .. math:: d_{avg} = \\frac{\\overline{X} - \\overline{Y}} {0.5 * (\\sigma_1 + \\sigma_2)} The Cohen’s d is a biased estimate of the population effect size, especially for small samples (n < 20). It is often preferable to use the corrected effect size, or Hedges’g, instead: .. math:: g = d * (1 - \\frac{3}{4(n_1 + n_2) - 9}) If eftype = 'glass', the Glass :math:`\\delta` is reported, using the group with the lowest variance as the control group: .. math:: \\delta = \\frac{\\overline{X} - \\overline{Y}}{\\sigma_{control}} References ---------- .. [1] Lakens, D., 2013. Calculating and reporting effect sizes to facilitate cumulative science: a practical primer for t-tests and ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863 .. [2] Cumming, Geoff. Understanding the new statistics: Effect sizes, confidence intervals, and meta-analysis. Routledge, 2013. Examples -------- 1. Compute Cohen d from two independent set of observations. >>> import numpy as np >>> from pingouin import compute_effsize >>> np.random.seed(123) >>> x = np.random.normal(2, size=100) >>> y = np.random.normal(2.3, size=95) >>> d = compute_effsize(x=x, y=y, eftype='cohen', paired=False) >>> print(d) -0.2835170152506578 2. Compute Hedges g from two paired set of observations. >>> import numpy as np >>> from pingouin import compute_effsize >>> x = [1.62, 2.21, 3.79, 1.66, 1.86, 1.87, 4.51, 4.49, 3.3 , 2.69] >>> y = [0.91, 3., 2.28, 0.49, 1.42, 3.65, -0.43, 1.57, 3.27, 1.13] >>> g = compute_effsize(x=x, y=y, eftype='hedges', paired=True) >>> print(g) 0.8370985097811404 3. Compute Glass delta from two independent set of observations. The group with the lowest variance will automatically be selected as the control. >>> import numpy as np >>> from pingouin import compute_effsize >>> np.random.seed(123) >>> x = np.random.normal(2, scale=1, size=50) >>> y = np.random.normal(2, scale=2, size=45) >>> d = compute_effsize(x=x, y=y, eftype='glass') >>> print(d) -0.1170721973604153
def members_entries(self, all_are_optional: bool=False) -> List[Tuple[str, str]]: """ Return an ordered list of elements for the _members section :param all_are_optional: True means we're in a choice situation so everything is optional :return: """ rval = [] if self._members: for member in self._members: rval += member.members_entries(all_are_optional) elif self._choices: for choice in self._choices: rval += self._context.reference(choice).members_entries(True) else: return [] return rval
Return an ordered list of elements for the _members section :param all_are_optional: True means we're in a choice situation so everything is optional :return:
def p_expression_And(self, p): 'expression : expression AND expression' p[0] = And(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : expression AND expression
def func_call_as_str(name, *args, **kwds): """ Return arguments and keyword arguments as formatted string >>> func_call_as_str('f', 1, 2, a=1) 'f(1, 2, a=1)' """ return '{0}({1})'.format( name, ', '.join(itertools.chain( map('{0!r}'.format, args), map('{0[0]!s}={0[1]!r}'.format, sorted(kwds.items())))))
Return arguments and keyword arguments as formatted string >>> func_call_as_str('f', 1, 2, a=1) 'f(1, 2, a=1)'
def copy(self, src, dst, other_system=None): """ Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio._core.io_system.SystemBase subclass): Unused. """ copy_source = self.get_client_kwargs(src) copy_destination = self.get_client_kwargs(dst) with _handle_oss_error(): bucket = self._get_bucket(copy_destination) bucket.copy_object( source_bucket_name=copy_source['bucket_name'], source_key=copy_source['key'], target_key=copy_destination['key'])
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio._core.io_system.SystemBase subclass): Unused.
def to_int(self, number, default=0): """Returns an integer """ try: return int(number) except (KeyError, ValueError): return self.to_int(default, 0)
Returns an integer
def elect(self, candidate_aggregates, candidate_id): """ Elect a candidate, updating internal state to track this. Calculate the paper count to be transferred on to other candidates, and if required schedule a distribution fo papers. """ # somewhat paranoid cross-check, but we've had this bug before.. assert(candidate_id not in self.candidates_elected) elected_no = len(self.candidates_elected) + 1 self.candidates_elected[candidate_id] = True transfer_value = 0 excess_votes = paper_count = None if len(self.candidates_elected) != self.vacancies: excess_votes = max(candidate_aggregates.get_vote_count(candidate_id) - self.quota, 0) assert(excess_votes >= 0) paper_count = self.candidate_bundle_transactions.get_paper_count(candidate_id) if paper_count > 0: transfer_value = fractions.Fraction(excess_votes, paper_count) assert(transfer_value >= 0) self.election_distributions_pending.append((candidate_id, transfer_value, excess_votes)) self.results.candidate_elected( CandidateElected( candidate_id=candidate_id, order=elected_no, excess_votes=excess_votes, paper_count=paper_count, transfer_value=transfer_value))
Elect a candidate, updating internal state to track this. Calculate the paper count to be transferred on to other candidates, and if required schedule a distribution fo papers.
def LoadExclusions(self, snps): """ Load locus exclusions. :param snps: Can either be a list of rsids or a file containing rsids. :return: None If snps is a file, the file must only contain RSIDs separated by whitespace (tabs, spaces and return characters). """ snp_names = [] if len(snps) == 1 and os.path.isfile(snps[0]): snp_names = open(snps).read().strip().split() else: snp_names = snps for snp in snp_names: if len(snp.strip()) > 0: self.ignored_rs.append(snp)
Load locus exclusions. :param snps: Can either be a list of rsids or a file containing rsids. :return: None If snps is a file, the file must only contain RSIDs separated by whitespace (tabs, spaces and return characters).
def _prepare_calls(result_file, out_dir, data): """Write summary file of results of HLA typing by allele. """ sample = dd.get_sample_name(data) out_file = os.path.join(out_dir, "%s-optitype.csv" % (sample)) if not utils.file_uptodate(out_file, result_file): hla_truth = bwakit.get_hla_truthset(data) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) allele_info = _parse_result_file(result_file) if len(allele_info) == 1: writer.writerow(["sample", "locus", "alleles", "expected", "validates"]) else: writer.writerow(["sample", "local", "index", "alleles", "score"]) for j, (alleles, score) in enumerate(allele_info): for hla_locus, call_alleles in alleles: truth_alleles = tz.get_in([sample, hla_locus], hla_truth, []) if len(allele_info) == 1: writer.writerow([sample, hla_locus, ";".join(call_alleles), ";".join(truth_alleles), bwakit.matches_truth(call_alleles, truth_alleles, data)]) else: writer.writerow([sample, hla_locus, j, ";".join(call_alleles), score]) return out_file
Write summary file of results of HLA typing by allele.
def from_dict(cls, operation, client, **caller_metadata): """Factory: construct an instance from a dictionary. :type operation: dict :param operation: Operation as a JSON object. :type client: :class:`~google.cloud.client.Client` :param client: The client used to poll for the status of the operation. :type caller_metadata: dict :param caller_metadata: caller-assigned metadata about the operation :rtype: :class:`Operation` :returns: new instance, with attributes based on the protobuf. """ operation_pb = json_format.ParseDict(operation, operations_pb2.Operation()) result = cls(operation_pb.name, client, **caller_metadata) result._update_state(operation_pb) result._from_grpc = False return result
Factory: construct an instance from a dictionary. :type operation: dict :param operation: Operation as a JSON object. :type client: :class:`~google.cloud.client.Client` :param client: The client used to poll for the status of the operation. :type caller_metadata: dict :param caller_metadata: caller-assigned metadata about the operation :rtype: :class:`Operation` :returns: new instance, with attributes based on the protobuf.
def _query_entities(self, table_name, filter=None, select=None, max_results=None, marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None, _context=None): ''' Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int max_results: The maximum number of entities to return. :param obj marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of table. The marker value is opaque to the client. :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list(:class:`~azure.storage.table.models.Entity`) ''' _validate_not_none('table_name', table_name) _validate_not_none('accept', accept) next_partition_key = None if marker is None else marker.get('nextpartitionkey') next_row_key = None if marker is None else marker.get('nextrowkey') request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations(secondary=True) request.path = '/' + _to_str(table_name) + '()' request.headers = {'Accept': _to_str(accept)} request.query = { '$filter': _to_str(filter), '$select': _to_str(select), '$top': _int_to_str(max_results), 'NextPartitionKey': _to_str(next_partition_key), 'NextRowKey': _to_str(next_row_key), 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_json_response_to_entities, [property_resolver, self.require_encryption, self.key_encryption_key, self.key_resolver_function], operation_context=_context)
Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int max_results: The maximum number of entities to return. :param obj marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of table. The marker value is opaque to the client. :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list(:class:`~azure.storage.table.models.Entity`)
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ indent = ' ' * indent if self.value is not None: return '{}{} {}'.format(indent, self.name, pretty_str(self.value)) return indent + self.name
Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation.
def cmd_host(verbose): """Collect information about the host where habu is running. Example: \b $ habu.host { "kernel": [ "Linux", "demo123", "5.0.6-200.fc29.x86_64", "#1 SMP Wed Apr 3 15:09:51 UTC 2019", "x86_64", "x86_64" ], "distribution": [ "Fedora", "29", "Twenty Nine" ], "libc": [ "glibc", "2.2.5" ], "arch": "x86_64", "python_version": "3.7.3", "os_name": "Linux", "cpu": "x86_64", "static_hostname": "demo123", "fqdn": "demo123.lab.sierra" } """ if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') print("Gather information about the host...", file=sys.stderr) result = gather_details() if result: print(json.dumps(result, indent=4)) else: print("[X] Unable to gather information") return True
Collect information about the host where habu is running. Example: \b $ habu.host { "kernel": [ "Linux", "demo123", "5.0.6-200.fc29.x86_64", "#1 SMP Wed Apr 3 15:09:51 UTC 2019", "x86_64", "x86_64" ], "distribution": [ "Fedora", "29", "Twenty Nine" ], "libc": [ "glibc", "2.2.5" ], "arch": "x86_64", "python_version": "3.7.3", "os_name": "Linux", "cpu": "x86_64", "static_hostname": "demo123", "fqdn": "demo123.lab.sierra" }
def current_rev_reg_id(base_dir: str, cd_id: str) -> str: """ Return the current revocation registry identifier for input credential definition identifier, in input directory. Raise AbsentTails if no corresponding tails file, signifying no such revocation registry defined. :param base_dir: base directory for tails files, thereafter split by cred def id :param cd_id: credential definition identifier of interest :return: identifier for current revocation registry on input credential definition identifier """ tags = [int(rev_reg_id2tag(basename(f))) for f in Tails.links(base_dir) if cd_id in basename(f)] if not tags: raise AbsentTails('No tails files present for cred def id {}'.format(cd_id)) return rev_reg_id(cd_id, str(max(tags)))
Return the current revocation registry identifier for input credential definition identifier, in input directory. Raise AbsentTails if no corresponding tails file, signifying no such revocation registry defined. :param base_dir: base directory for tails files, thereafter split by cred def id :param cd_id: credential definition identifier of interest :return: identifier for current revocation registry on input credential definition identifier
def merge_configs(config: Dict[str, Any], default_config: Dict[str, Any]) -> Dict[str, Any]: """ Merges a `default` config with DAG config. Used to set default values for a group of DAGs. :param config: config to merge in default values :type config: Dict[str, Any] :param default_config: config to merge default values from :type default_config: Dict[str, Any] :returns: dict with merged configs :type: Dict[str, Any] """ for key in default_config: if key in config: if isinstance(config[key], dict) and isinstance(default_config[key], dict): merge_configs(config[key], default_config[key]) else: config[key]: Any = default_config[key] return config
Merges a `default` config with DAG config. Used to set default values for a group of DAGs. :param config: config to merge in default values :type config: Dict[str, Any] :param default_config: config to merge default values from :type default_config: Dict[str, Any] :returns: dict with merged configs :type: Dict[str, Any]
def userpass(self, dir="ppcoin"): """Reads config file for username/password""" source = os.path.expanduser("~/.{0}/{0}.conf").format(dir) dest = open(source, "r") with dest as conf: for line in conf: if line.startswith("rpcuser"): username = line.split("=")[1].strip() if line.startswith("rpcpassword"): password = line.split("=")[1].strip() return username, password
Reads config file for username/password
def get(self, mail): """ Get one document store into LinShare.""" users = (v for v in self.list() if v.get('mail') == mail) for i in users: self.log.debug(i) return i return None
Get one document store into LinShare.
def t_NATIVEPHP(t): r'<\?php((?!<\?php)[\s\S])*\?>[ \t]*(?=\n)' lineNoInc(t) t.value = t.value[6:].lstrip() pos2 = t.value.rfind('?>') t.value = t.value[0:pos2].rstrip() # print t.value return t
r'<\?php((?!<\?php)[\s\S])*\?>[ \t]*(?=\n)
def cos_zen(utc_time, lon, lat): """Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees. """ lon = np.deg2rad(lon) lat = np.deg2rad(lat) r_a, dec = sun_ra_dec(utc_time) h__ = _local_hour_angle(utc_time, lon, r_a) return (np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__))
Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*. utc_time: datetime.datetime instance of the UTC time lon and lat in degrees.
def __collect_interfaces_return(interfaces): """Collect new style (44.1+) return values to old-style kv-list""" acc = [] for (interfaceName, interfaceData) in interfaces.items(): signalValues = interfaceData.get("signals", {}) for (signalName, signalValue) in signalValues.items(): pinName = "{0}.{1}".format(interfaceName, signalName) acc.append({'id': pinName, 'value': signalValue}) return acc
Collect new style (44.1+) return values to old-style kv-list
def find_satisfied_condition(conditions, ps): """Returns the first element of 'property-sets' which is a subset of 'properties', or an empty list if no such element exists.""" assert is_iterable_typed(conditions, property_set.PropertySet) assert isinstance(ps, property_set.PropertySet) for condition in conditions: found_all = True for i in condition.all(): if i.value: found = i.value in ps.get(i.feature) else: # Handle value-less properties like '<architecture>' (compare with # '<architecture>x86'). # If $(i) is a value-less property it should match default # value of an optional property. See the first line in the # example below: # # property set properties result # <a> <b>foo <b>foo match # <a> <b>foo <a>foo <b>foo no match # <a>foo <b>foo <b>foo no match # <a>foo <b>foo <a>foo <b>foo match found = not ps.get(i.feature) found_all = found_all and found if found_all: return condition return None
Returns the first element of 'property-sets' which is a subset of 'properties', or an empty list if no such element exists.
def update_model_cache(table_name): """ Updates model cache by generating a new key for the model """ model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex) model_cache_backend.share_model_cache_info(model_cache_info)
Updates model cache by generating a new key for the model