text
stringlengths
78
104k
score
float64
0
0.18
def get_root_vaults(self): """Gets the root vaults in this vault hierarchy. return: (osid.authorization.VaultList) - the root vaults raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_root_bins if self._catalog_session is not None: return self._catalog_session.get_root_catalogs() return VaultLookupSession( self._proxy, self._runtime).get_vaults_by_ids(list(self.get_root_vault_ids()))
0.002882
def worker_errordown(self, node, error): """Emitted by the WorkerController when a node dies.""" self.config.hook.pytest_testnodedown(node=node, error=error) try: crashitem = self.sched.remove_node(node) except KeyError: pass else: if crashitem: self.handle_crashitem(crashitem, node) self._failed_nodes_count += 1 maximum_reached = ( self._max_worker_restart is not None and self._failed_nodes_count > self._max_worker_restart ) if maximum_reached: if self._max_worker_restart == 0: msg = "Worker restarting disabled" else: msg = "Maximum crashed workers reached: %d" % self._max_worker_restart self.report_line(msg) else: self.report_line("Replacing crashed worker %s" % node.gateway.id) self._clone_node(node) self._active_nodes.remove(node)
0.002997
def update_roster(self, REQUEST=None): """ If workspace is team managed, users can add/remove participants. Any user with the manage workspace permission can add/remove participants and admins. """ CheckAuthenticator(self.request) PostOnly(self.request) form = self.request.form entries = form.get('entries', []) self.update_users(entries) api.portal.show_message(message=_(u'Roster updated.'), request=self.request) return self.request.response.redirect( '%s/@@edit-roster' % self.context.absolute_url())
0.003101
def _transcoding(cls, data): """编码转换 :param data: 需要转换的数据 :return: 转换好的数据 """ if not data: return data result = None if isinstance(data, str) and hasattr(data, 'decode'): result = data.decode('utf-8') else: result = data return result
0.005831
def label_from_bin(buf): """ Converts binary representation label to integer. :param buf: Binary representation of label. :return: MPLS Label and BoS bit. """ mpls_label = type_desc.Int3.to_user(six.binary_type(buf)) return mpls_label >> 4, mpls_label & 1
0.003509
def _get_tasks_from_reminder_list( self, listName): """*get the tasks from a reminder app list as a string in taskpaper format* **Key Arguments:** - ``listName`` -- the name of the reminders list **Return:** - ``newTasks`` -- a string containing tasks in taskpaper format """ self.log.info('starting the ``_get_tasks_from_reminder_list`` method') from subprocess import Popen, PIPE, STDOUT applescript = """ tell application "Reminders" --set output to name of reminders set myList to "%(listName)s" if (count of (reminders in list myList whose completed is false)) > 0 then set todoListNames to name of reminders in list myList whose completed is false set todoListNotes to body of reminders in list myList whose completed is false set todoListDates to due date of reminders in list myList whose completed is false set output to "" repeat with itemNum from 1 to (count of todoListNames) set output to output & "- " & (item itemNum of todoListNames) if (item itemNum of todoListDates) > date "Tuesday, 25 December 1900 at 00:00:00" then set dueDate to my date_time_to_iso(item itemNum of todoListDates) set output to output & " @due(" & dueDate & ")" end if set output to output & "\n" if item itemNum of todoListNotes exists then repeat with para in every paragraph of (item itemNum of todoListNotes) set output to (output & " " & para as string) & "\n" end repeat end if end repeat else set output to "" end if return output end tell on date_time_to_iso(dt) set {year:y, month:m, day:d, hours:h, minutes:min, seconds:s} to dt set y to text 2 through -1 of ((y + 10000) as text) set m to text 2 through -1 of ((m + 100) as text) set d to text 2 through -1 of ((d + 100) as text) set h to text 2 through -1 of ((h + 100) as text) set min to text 2 through -1 of ((min + 100) as text) set s to text 2 through -1 of ((s + 100) as text) return y & "-" & m & "-" & d & " " & h & ":" & min end date_time_to_iso """ % locals() cmd = "\n".join(["osascript <<EOD", applescript, "EOD"]) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() self.log.debug('output: %(stdout)s' % locals()) print stdout print newTasks = stdout.decode("utf-8") if len(stderr): self.log.error(stderr) sys.exit(0) self.log.info('completed the ``_get_tasks_from_reminder_list`` method') print newTasks return newTasks
0.004
def search_in_hdx(cls, query='*:*', configuration=None, page_size=1000, **kwargs): # type: (Optional[str], Optional[Configuration], int, Any) -> List['Dataset'] """Searches for datasets in HDX Args: query (Optional[str]): Query (in Solr format). Defaults to '*:*'. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. **kwargs: See below fq (string): Any filter queries to apply sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'. rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize). start (int): Offset in the complete result for where the set of returned datasets should begin facet (string): Whether to enable faceted results. Default to True. facet.mincount (int): Minimum counts for facet fields should be included in the results facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50. facet.field (List[str]): Fields to facet upon. Default is empty. use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False. Returns: List[Dataset]: list of datasets resulting from query """ dataset = Dataset(configuration=configuration) total_rows = kwargs.get('rows', cls.max_int) start = kwargs.get('start', 0) all_datasets = None attempts = 0 while attempts < cls.max_attempts and all_datasets is None: # if the count values vary for multiple calls, then must redo query all_datasets = list() counts = set() for page in range(total_rows // page_size + 1): pagetimespagesize = page * page_size kwargs['start'] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min(rows_left, page_size) kwargs['rows'] = rows _, result = dataset._read_from_hdx('dataset', query, 'q', Dataset.actions()['search'], **kwargs) datasets = list() if result: count = result.get('count', None) if count: counts.add(count) no_results = len(result['results']) for datasetdict in result['results']: dataset = Dataset(configuration=configuration) dataset.old_data = dict() dataset.data = datasetdict dataset._dataset_create_resources() datasets.append(dataset) all_datasets += datasets if no_results < rows: break else: break else: logger.debug(result) if all_datasets and len(counts) != 1: # Make sure counts are all same for multiple calls to HDX all_datasets = None attempts += 1 else: ids = [dataset['id'] for dataset in all_datasets] # check for duplicates (shouldn't happen) if len(ids) != len(set(ids)): all_datasets = None attempts += 1 if attempts == cls.max_attempts and all_datasets is None: raise HDXError('Maximum attempts reached for searching for datasets!') return all_datasets
0.004545
def load_image(self, file_path, redraw=True): """ Accepts a path to an 8 x 8 image file and updates the LED matrix with the image """ if not os.path.exists(file_path): raise IOError('%s not found' % file_path) img = Image.open(file_path).convert('RGB') pixel_list = list(map(list, img.getdata())) if redraw: self.set_pixels(pixel_list) return pixel_list
0.004415
def gen_cv_preds(clf, arr, sel_score, num_chunks=3): """ Generates cross validated predictions using an input classifier and data. clf is a classifier that implements that implements the fit and predict methods. arr is the input data array (X) sel_score is the target list (y). y[n] corresponds to X[n,:] num_chunks is the number of cross validation folds to use Returns an array of the predictions where prediction[n] corresponds to X[n,:] """ cv_len = int(math.floor(len(sel_score) / num_chunks)) chunks = [] for i in range(0, num_chunks): range_min = i * cv_len range_max = ((i + 1) * cv_len) if i == num_chunks - 1: range_max = len(sel_score) chunks.append(range(range_min, range_max)) preds = [] set_score = numpy.asarray(sel_score, dtype=numpy.int) chunk_vec = numpy.asarray(range(0, len(chunks))) for i in xrange(0, len(chunks)): loop_inds = list( chain.from_iterable([chunks[int(z)] for z, m in enumerate(range(0, len(chunks))) if int(z) != i])) sim_fit = clf.fit(arr[loop_inds], set_score[loop_inds]) preds.append(list(sim_fit.predict(arr[chunks[i]]))) all_preds = list(chain(*preds)) return(all_preds)
0.003175
def get_td_from_freqtau(template=None, taper=None, **kwargs): """Return time domain ringdown with all the modes specified. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. Each mode and overtone will have a different taper depending on its tau, the final taper being the superposition of all the tapers. lmns : list Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55). The n specifies the number of overtones desired for the corresponding lm pair (maximum n=8). Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330 f_lmn: float Central frequency of the lmn overtone, as many as number of modes. tau_lmn: float Damping time of the lmn overtone, as many as number of modes. amp220 : float Amplitude of the fundamental 220 mode. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians. If None, the spherical harmonics will be set to 1. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). t_final : {None, float}, optional The ending time of the output frequency series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplustilde: FrequencySeries The plus phase of a ringdown with the lm modes specified and n overtones in frequency domain. hcrosstilde: FrequencySeries The cross phase of a ringdown with the lm modes specified and n overtones in frequency domain. """ input_params = props(template, freqtau_required_args, **kwargs) # Get required args f_0, tau = lm_freqs_taus(**input_params) lmns = input_params['lmns'] for lmn in lmns: if int(lmn[2]) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # following may not be in input_params inc = input_params.pop('inclination', None) delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = lm_deltat(f_0, tau, lmns) if not t_final: t_final = lm_tfinal(tau, lmns) kmax = int(t_final / delta_t) + 1 # Different overtones will have different tapering window-size # Find maximum window size to create long enough output vector if taper: taper_window = int(taper*max(tau.values())/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) if taper: start = - taper * max(tau.values()) outplus._epoch, outcross._epoch = start, start for lmn in lmns: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) hplus, hcross = get_td_lm(freqs=f_0, taus=tau, l=l, m=m, nmodes=nmodes, taper=taper, inclination=inc, delta_t=delta_t, t_final=t_final, **input_params) if not taper: outplus.data += hplus.data outcross.data += hcross.data else: outplus = taper_shift(hplus, outplus) outcross = taper_shift(hcross, outcross) return outplus, outcross
0.000898
def interval_timer(interval, func, *args, **kwargs): '''Interval timer function. Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708 ''' stopped = Event() def loop(): while not stopped.wait(interval): # the first call is after interval func(*args, **kwargs) Thread(name='IntervalTimerThread', target=loop).start() return stopped.set
0.004673
def _new_device_id(self, key): """ Generate a new device id or return existing device id for key :param key: Key for device :type key: unicode :return: The device id :rtype: int """ device_id = Id.SERVER + 1 if key in self._key2deviceId: return self._key2deviceId[key] while device_id in self._clients: device_id += 1 return device_id
0.004474
def parse(self): """Iterate through the directory and extract package/version info.""" for package in os.listdir(self.root_directory): directory = os.path.join(self.root_directory, package) if not os.path.isdir(directory): continue dir_contents = os.listdir(directory) sdists = [tarball for tarball in dir_contents if (tarball.endswith('.tar.gz') and tarball.startswith(package + '-'))] for sdist in sdists: version = sdist.replace('.tar.gz', '').replace( package + '-', '') self.packages[package].append(version)
0.002837
def topological_sort(data): """Topological sort the given dictionary structure. Args: data (dict); dictionary structure where the value is a list of dependencies for that given key. For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``. Returns: tuple: the dependencies in constructor order """ def check_self_dependencies(input_data): """Check if there are self dependencies within a node. Self dependencies are for example: ``{'a': ('a',)}``. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Raises: ValueError: if there are indeed self dependencies """ for k, v in input_data.items(): if k in v: raise ValueError('Self-dependency, {} depends on itself.'.format(k)) def prepare_input_data(input_data): """Prepares the input data by making sets of the dependencies. This automatically removes redundant items. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Returns: dict: a copy of the input dict but with sets instead of lists for the dependencies. """ return {k: set(v) for k, v in input_data.items()} def find_items_without_dependencies(input_data): """This searches the dependencies of all the items for items that have no dependencies. For example, suppose the input is: ``{'a': ('b',)}``, then ``a`` depends on ``b`` and ``b`` depends on nothing. This class returns ``(b,)`` in this example. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Returns: list: the list of items without any dependency. """ return list(reduce(set.union, input_data.values()) - set(input_data.keys())) def add_empty_dependencies(data): items_without_dependencies = find_items_without_dependencies(data) data.update({item: set() for item in items_without_dependencies}) def get_sorted(input_data): data = input_data while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered} if len(data) != 0: raise ValueError('Cyclic dependencies exist ' 'among these items: {}'.format(', '.join(repr(x) for x in data.items()))) check_self_dependencies(data) if not len(data): return [] data_copy = prepare_input_data(data) add_empty_dependencies(data_copy) result = [] for d in get_sorted(data_copy): try: d = sorted(d) except TypeError: d = list(d) result.extend(d) return result
0.004608
def check_redirect_uris(uris, client_type=None): """ This function checks all return uris provided and tries to deduce as what type of client we should register. :param uris: The redirect URIs to check. :type uris: list :param client_type: An indicator of which client type you are expecting to be used. If this does not match the deduced type, an error will be raised. :type client_type: str :returns: The deduced client type. :rtype: str :raises ValueError: An error occured while checking the redirect uris. .. versionadded:: 1.0 """ if client_type not in [None, 'native', 'web']: raise ValueError('Invalid client type indicator used') if not isinstance(uris, list): raise ValueError('uris needs to be a list of strings') if len(uris) < 1: raise ValueError('At least one return URI needs to be provided') for uri in uris: if uri.startswith('https://'): if client_type == 'native': raise ValueError('https url with native client') client_type = 'web' elif uri.startswith('http://localhost'): if client_type == 'web': raise ValueError('http://localhost url with web client') client_type = 'native' else: if (uri.startswith('http://') and not uri.startswith('http://localhost')): raise ValueError('http:// url with non-localhost is illegal') else: raise ValueError('Invalid uri provided: %s' % uri) return client_type
0.001241
def _get_all_objs_of_type(type_, parent): """Get all attributes of the given type from the given object. Parameters ---------- type_ : The desired type parent : The object from which to get the attributes with type matching 'type_' Returns ------- A list (possibly empty) of attributes from 'parent' """ return set([obj for obj in parent.__dict__.values() if isinstance(obj, type_)])
0.002227
def createSite(self, username, password, configStoreConnection, directories, cluster=None, logsSettings=None, runAsync=False ): """ This is the first operation that you must invoke when you install ArcGIS Server for the first time. Creating a new site involves: -Allocating a store to save the site configuration -Configuring the server machine and registering it with the site -Creating a new cluster configuration that includes the server machine -Configuring server directories -Deploying the services that are marked to auto-deploy Because of the sheer number of tasks, it usually takes a little while for this operation to complete. Once a site has been created, you can publish GIS services and deploy them to your server machines. Inputs: username - The name of the administrative account to be used by the site. This can be changed at a later stage. password - The credentials of the administrative account. configStoreConnection - A JSON object representing the connection to the configuration store. By default, the configuration store will be maintained in the ArcGIS Server installation directory. directories - A JSON object representing a collection of server directories to create. By default, the server directories will be created locally. cluster - An optional cluster configuration. By default, the site will create a cluster called 'default' with the first available port numbers starting from 4004. logsSettings - Optional log settings. runAsync - A flag to indicate if the operation needs to be run asynchronously. Values: true | false """ url = self._url + "/createNewSite" params = { "f" : "json", "cluster" : cluster, "directories" : directories, "username" : username, "password" : password, "configStoreConnection" : configStoreConnection, "logSettings" : logsSettings, "runAsync" : runAsync } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
0.008188
def _ExpandArtifactFilesSource(self, source, requested): """Recursively expands an artifact files source.""" expanded_source = rdf_artifacts.ExpandedSource(base_source=source) sub_sources = [] artifact_list = [] if "artifact_list" in source.attributes: artifact_list = source.attributes["artifact_list"] for artifact_name in artifact_list: if artifact_name in self.processed_artifacts: continue artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name) for expanded_artifact in self.Expand(artifact_obj, requested): sub_sources.extend(expanded_artifact.sources) expanded_source.artifact_sources = sub_sources expanded_source.path_type = self._path_type return [expanded_source]
0.006545
def configure(self, options, conf): """Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enableOpt) is true. """ if not self.can_configure: return self.conf = conf if hasattr(options, self.enableOpt): self.enabled = getattr(options, self.enableOpt)
0.004651
def numenta_local_wmax_extended(self, x): r""" New local w-max procedure (experimental). This `encode`-function extends the local w-max overlap procedure. See "Numenta’s local inhibition revisited" (Section 6) in `latex/notes.pdf`. Note that we the "activation probabilies" $a_{ij}$ are already encoded as $h_{ij}$. """ pooler = self W = pooler.connections.visible_to_hidden H = pooler.connections.hidden_to_hidden b = pooler.connections.hidden_bias n, m = pooler.output_size, pooler.input_size w = pooler.code_weight s = pooler.sparsity y = np.zeros(n) a = pooler.average_activity scores = np.exp(-b)*np.dot(W,x) for i in range(n): estimated_activity = 0. for j in range(n): # a_ij = a[i,j]/a[i,i] a_ij = H[i,j] if scores[j] >= scores[i]: estimated_activity += a_ij if estimated_activity < s: y[i] = 1. return y
0.013739
def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False): """ Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files Args: path_dir: (str) dir containing DOS files efermi: (float) Fermi energy dos_spin: (int) -1 for spin down, +1 for spin up trim_dos: (bool) whether to post-process / trim DOS Returns: tuple - (DOS, dict of partial DOS) """ data_dos = {'total': [], 'partial': {}} # parse the total DOS data ## format is energy, DOS, integrated DOS with open(os.path.join(path_dir, "boltztrap.transdos"), 'r') as f: count_series = 0 # TODO: why is count_series needed? for line in f: if line.lstrip().startswith("#"): count_series += 1 if count_series > 1: break else: data_dos['total'].append( [Energy(float(line.split()[0]), "Ry").to("eV"), float(line.split()[1])]) total_elec = float(line.split()[2]) lw_l = 0 hg_l = -len(data_dos['total']) if trim_dos: # Francesco knows what this does # It has something to do with a trick of adding fake energies # at the endpoints of the DOS, and then re-trimming it. This is # to get the same energy scale for up and down spin DOS. tmp_data = np.array(data_dos['total']) tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:] lw_l = len(tmp_data[:, 1]) - len(tmp_den) tmp_ene = tmp_data[lw_l:, 0] tmp_den = np.trim_zeros(tmp_den, 'b')[:-1] hg_l = len(tmp_ene) - len(tmp_den) tmp_ene = tmp_ene[:-hg_l] tmp_data = np.vstack((tmp_ene, tmp_den)).T data_dos['total'] = tmp_data.tolist() # parse partial DOS data for file_name in os.listdir(path_dir): if file_name.endswith( "transdos") and file_name != 'boltztrap.transdos': tokens = file_name.split(".")[1].split("_") site = tokens[1] orb = '_'.join(tokens[2:]) with open(os.path.join(path_dir, file_name), 'r') as f: for line in f: if not line.lstrip().startswith(" #"): if site not in data_dos['partial']: data_dos['partial'][site] = {} if orb not in data_dos['partial'][site]: data_dos['partial'][site][orb] = [] data_dos['partial'][site][orb].append( float(line.split()[1])) data_dos['partial'][site][orb] = data_dos['partial'][site][ orb][lw_l:-hg_l] dos_full = {'energy': [], 'density': []} for t in data_dos['total']: dos_full['energy'].append(t[0]) dos_full['density'].append(t[1]) dos = Dos(efermi, dos_full['energy'], {Spin(dos_spin): dos_full['density']}) dos_partial = data_dos['partial'] # TODO: make this real DOS object? return dos, dos_partial
0.000888
def open_file(self, fname, external=False): """ Open filename with the appropriate application Redirect to the right widget (txt -> editor, spydata -> workspace, ...) or open file outside Spyder (if extension is not supported) """ fname = to_text_string(fname) ext = osp.splitext(fname)[1] if encoding.is_text_file(fname): self.editor.load(fname) elif self.variableexplorer is not None and ext in IMPORT_EXT: self.variableexplorer.import_data(fname) elif not external: fname = file_uri(fname) programs.start_file(fname)
0.003026
def flush(self, key=None): """Flush the cache. If I{key} is specified, only that item is flushed. Otherwise the entire cache is flushed. @param key: the key to flush @type key: (dns.name.Name, int, int) tuple or None """ if not key is None: if key in self.data: del self.data[key] else: self.data = {} self.next_cleaning = time.time() + self.cleaning_interval
0.006263
def vec(val, width, signed=None): """create hdl vector value""" return Bits(width, signed, forceVector=True).fromPy(val)
0.007813
def add_geo(self, geo_location): """ Saves a <geo-location> Element, to be incoporated into the Open511 geometry field. """ if not geo_location.xpath('latitude') and geo_location.xpath('longitude'): raise Exception("Invalid geo-location %s" % etree.tostring(geo_location)) if _xpath_or_none(geo_location, 'horizontal-datum/text()') not in ('wgs84', None): logger.warning("Unsupported horizontal-datum in %s" % etree.tostring(geo_location)) return point = ( float(_xpath_or_none(geo_location, 'longitude/text()')) / 1000000, float(_xpath_or_none(geo_location, 'latitude/text()')) / 1000000 ) self.points.add(point)
0.008054
def click(self) -> None: """Send click event.""" if self.connected: self.js_exec('click') else: # Web上に表示されてれば勝手にブラウザ側からクリックイベント発生する # のでローカルのクリックイベント不要 msg = {'proto': '', 'type': 'click', 'currentTarget': {'id': self.wdom_id}, 'target': {'id': self.wdom_id}} e = create_event(msg) self._dispatch_event(e)
0.004545
def heightmap_get_interpolated_value( hm: np.ndarray, x: float, y: float ) -> float: """Return the interpolated height at non integer coordinates. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (float): A floating point x coordinate. y (float): A floating point y coordinate. Returns: float: The value at ``x``, ``y``. """ return float( lib.TCOD_heightmap_get_interpolated_value(_heightmap_cdata(hm), x, y) )
0.001961
def has_permissions(**perms): """A :func:`.check` that is added that checks if the member has all of the permissions necessary. The permissions passed in must be exactly like the properties shown under :class:`.discord.Permissions`. This check raises a special exception, :exc:`.MissingPermissions` that is inherited from :exc:`.CheckFailure`. Parameters ------------ perms An argument list of permissions to check for. Example --------- .. code-block:: python3 @bot.command() @commands.has_permissions(manage_messages=True) async def test(ctx): await ctx.send('You can manage messages.') """ def predicate(ctx): ch = ctx.channel permissions = ch.permissions_for(ctx.author) missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value] if not missing: return True raise MissingPermissions(missing) return check(predicate)
0.001957
def mask(self): """Masks the layer below with this layer. Commits the current layer to the alpha channel of the previous layer. Primarily, mask() is useful when using gradient layers as masks on images below. For example: canvas.layer("image.jpg") canvas.gradient() canvas.layer(2).flip() canvas.layer(2).mask() Adds a white-to-black linear gradient to the alpha channel of image.jpg, making it evolve from opaque on the left to transparent on the right. """ if len(self.canvas.layers) < 2: return i = self.index() if i == 0: return layer = self.canvas.layers[i-1] alpha = Image.new("L", layer.img.size, 0) #Make a composite of the mask layer in grayscale #and its own alpha channel. mask = self.canvas.layers[i] flat = ImageChops.darker(mask.img.convert("L"), mask.img.split()[3]) alpha.paste(flat, (mask.x,mask.y)) alpha = ImageChops.darker(alpha, layer.img.split()[3]) layer.img.putalpha(alpha) self.delete()
0.019247
def _check_stations_csv(self, usr, root): ''' Reclocate a stations.csv copy in user home for easy manage. E.g. not need sudo when you add new station, etc ''' if path.exists(path.join(usr, 'stations.csv')): return else: copyfile(root, path.join(usr, 'stations.csv'))
0.006116
def check_status(self): """ tests both the ext_url and local_url to see if the database is running returns: True if a connection can be made False if the connection cannot me made """ log = logging.getLogger("%s.%s" % (self.log_name, inspect.stack()[0][3])) log.setLevel(self.log_level) if self.url: return True try: result = requests.get(self.ext_url) self.url = self.ext_url return True except requests.exceptions.ConnectionError: pass try: result = requests.get(self.local_url) log.warning("Url '%s' not connecting. Using local_url '%s'" % \ (self.ext_url, self.local_url)) self.url = self.local_url return True except requests.exceptions.ConnectionError: self.url = None log.warning("Unable to connect using urls: %s" % set([self.ext_url, self.local_url])) return False
0.005111
def _clone_post_init(self, obj=None, **kwargs): """ obj must be another Plottable instance. obj is used by Clone to properly transfer all attributes onto this object. """ # Initialize the extra attributes if obj is None or obj is self: # We must be asrootpy-ing a ROOT object # or freshly init-ing a rootpy object for attr, value in Plottable.EXTRA_ATTRS.items(): # Use the default value setattr(self, attr, value) else: for attr, value in Plottable.EXTRA_ATTRS.items(): setattr(self, attr, getattr(obj, attr)) # Create aliases from deprecated to current attributes for depattr, newattr in Plottable.EXTRA_ATTRS_DEPRECATED.items(): setattr(Plottable, depattr, property( fget=Plottable._get_attr_depr(depattr, newattr), fset=Plottable._set_attr_depr(depattr, newattr))) if obj is None or obj is self: # We must be asrootpy-ing a ROOT object # or freshly init-ing a rootpy object # Initialize style attrs to style of TObject if isinstance(self, ROOT.TAttLine): self._linecolor = Color(ROOT.TAttLine.GetLineColor(self)) self._linestyle = LineStyle(ROOT.TAttLine.GetLineStyle(self)) self._linewidth = ROOT.TAttLine.GetLineWidth(self) else: # HistStack self._linecolor = Color(Plottable.DEFAULT_DECOR['linecolor']) self._linestyle = LineStyle(Plottable.DEFAULT_DECOR['linestyle']) self._linewidth = Plottable.DEFAULT_DECOR['linewidth'] if isinstance(self, ROOT.TAttFill): self._fillcolor = Color(ROOT.TAttFill.GetFillColor(self)) self._fillstyle = FillStyle(ROOT.TAttFill.GetFillStyle(self)) else: # HistStack self._fillcolor = Color(Plottable.DEFAULT_DECOR['fillcolor']) self._fillstyle = FillStyle(Plottable.DEFAULT_DECOR['fillstyle']) if isinstance(self, ROOT.TAttMarker): self._markercolor = Color(ROOT.TAttMarker.GetMarkerColor(self)) self._markerstyle = MarkerStyle(ROOT.TAttMarker.GetMarkerStyle(self)) self._markersize = ROOT.TAttMarker.GetMarkerSize(self) else: # HistStack self._markercolor = Color(Plottable.DEFAULT_DECOR['markercolor']) self._markerstyle = MarkerStyle(Plottable.DEFAULT_DECOR['markerstyle']) self._markersize = Plottable.DEFAULT_DECOR['markersize'] if obj is None: # Populate defaults if we are not asrootpy-ing existing object decor = dict(**Plottable.DEFAULT_DECOR) decor.update(Plottable.EXTRA_ATTRS) if 'color' in kwargs: decor.pop('linecolor', None) decor.pop('fillcolor', None) decor.pop('markercolor', None) decor.update(kwargs) self.decorate(**decor) else: # Initialize style attrs to style of the other object if isinstance(obj, ROOT.TAttLine): self.SetLineColor(obj.GetLineColor()) self.SetLineStyle(obj.GetLineStyle()) self.SetLineWidth(obj.GetLineWidth()) if isinstance(obj, ROOT.TAttFill): self.SetFillColor(obj.GetFillColor()) self.SetFillStyle(obj.GetFillStyle()) if isinstance(obj, ROOT.TAttMarker): self.SetMarkerColor(obj.GetMarkerColor()) self.SetMarkerStyle(obj.GetMarkerStyle()) self.SetMarkerSize(obj.GetMarkerSize()) if kwargs: self.decorate(**kwargs)
0.002048
def list_distributions(region=None, key=None, keyid=None, profile=None): ''' List, with moderate information, all CloudFront distributions in the bound account. region Region to connect to. key Secret key to use. keyid Access key to use. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.list_distributions ''' retries = 10 sleep = 6 conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) Items = [] while retries: try: log.debug('Garnering list of CloudFront distributions') Marker = '' while Marker is not None: ret = conn.list_distributions(Marker=Marker) Items += ret.get('DistributionList', {}).get('Items', []) Marker = ret.get('DistributionList', {}).get('NextMarker') return Items except botocore.exceptions.ParamValidationError as err: raise SaltInvocationError(str(err)) except botocore.exceptions.ClientError as err: if retries and err.response.get('Error', {}).get('Code') == 'Throttling': retries -= 1 log.debug('Throttled by AWS API, retrying in %s seconds...', sleep) time.sleep(sleep) continue log.error('Failed to list CloudFront distributions: %s', err.message) return None
0.003901
def gen_image(img, width, height, outfile, img_type='grey'): """Save an image with the given parameters.""" assert len(img) == width * height or len(img) == width * height * 3 if img_type == 'grey': misc.imsave(outfile, img.reshape(width, height)) elif img_type == 'color': misc.imsave(outfile, img.reshape(3, width, height))
0.002786
def _points(self, x_pos): """ Convert given data values into drawable points (x, y) and interpolated points if interpolate option is specified """ for series_group in (self.series, self.secondary_series): accumulation = [0] * self._len for serie in series_group[::-1 if self.stack_from_top else 1]: accumulation = list(map(sum, zip(accumulation, serie.values))) serie.points = [(x_pos[i], v) for i, v in enumerate(accumulation)] if serie.points and self.interpolate: serie.interpolated = self._interpolate(x_pos, accumulation) else: serie.interpolated = []
0.002646
def fix_relative_url(self, publish_type, rel_url): """ Fix post or page relative url to a standard, uniform format. :param publish_type: publish type ('post' or 'page') :param rel_url: relative url to fix :return: tuple(fixed relative url or file path if exists else None, file exists or not) :raise ValueError: unknown publish type """ if publish_type == 'post': return self.fix_post_relative_url(rel_url), False elif publish_type == 'page': return self.fix_page_relative_url(rel_url) else: raise ValueError( 'Publish type "{}" is not supported'.format(publish_type))
0.002762
def split_input(cls, mapper_spec): """Returns a list of input readers. An equal number of input files are assigned to each shard (+/- 1). If there are fewer files than shards, fewer than the requested number of shards will be used. Input files are currently never split (although for some formats could be and may be split in a future implementation). Args: mapper_spec: an instance of model.MapperSpec. Returns: A list of InputReaders. None when no input data can be found. """ reader_spec = cls.get_params(mapper_spec, allow_old=False) bucket = reader_spec[cls.BUCKET_NAME_PARAM] filenames = reader_spec[cls.OBJECT_NAMES_PARAM] delimiter = reader_spec.get(cls.DELIMITER_PARAM) account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM) buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM) fail_on_missing_input = reader_spec.get(cls.FAIL_ON_MISSING_INPUT) # Gather the complete list of files (expanding wildcards) all_filenames = [] for filename in filenames: if filename.endswith("*"): all_filenames.extend( [file_stat.filename for file_stat in cloudstorage.listbucket( "/" + bucket + "/" + filename[:-1], delimiter=delimiter, _account_id=account_id)]) else: all_filenames.append("/%s/%s" % (bucket, filename)) # Split into shards readers = [] for shard in range(0, mapper_spec.shard_count): shard_filenames = all_filenames[shard::mapper_spec.shard_count] if shard_filenames: reader = cls( shard_filenames, buffer_size=buffer_size, _account_id=account_id, delimiter=delimiter) reader._fail_on_missing_input = fail_on_missing_input readers.append(reader) return readers
0.002776
def _set_firmware(self, v, load=False): """ Setter method for firmware, mapped from YANG variable /firmware (container) If this variable is read-only (config: false) in the source YANG file, then _set_firmware is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_firmware() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=firmware.firmware, is_container='container', presence=False, yang_name="firmware", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'firmware operations', u'action': u'recover'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """firmware must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=firmware.firmware, is_container='container', presence=False, yang_name="firmware", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'firmware operations', u'action': u'recover'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__firmware = t if hasattr(self, '_set'): self._set()
0.005566
def case(context, institute, case_id, display_name): """Delete a case and it's variants from the database""" adapter = context.obj['adapter'] if not (case_id or display_name): click.echo("Please specify what case to delete") context.abort() if display_name: if not institute: click.echo("Please specify the owner of the case that should be " "deleted with flag '-i/--institute'.") context.abort() case_id = "{0}-{1}".format(institute, display_name) LOG.info("Running deleting case {0}".format(case_id)) case = adapter.delete_case( case_id=case_id, institute_id=institute, display_name=display_name ) if case.deleted_count == 1: adapter.delete_variants(case_id=case_id, variant_type='clinical') adapter.delete_variants(case_id=case_id, variant_type='research') else: LOG.warning("Case does not exist in database") context.abort()
0.000998
def getDynMeth(name): ''' Retrieve and return an unbound method by python path. ''' cname, fname = name.rsplit('.', 1) clas = getDynLocal(cname) if clas is None: return None return getattr(clas, fname, None)
0.004082
def chunks(lst, size): """Yield successive n-sized chunks from lst.""" for i in xrange(0, len(lst), size): yield lst[i:i + size]
0.006944
def as_hex(self): """ Return a color palette with hex codes instead of RGB values. """ hex = [mpl.colors.rgb2hex(rgb) for rgb in self] return ColorPalette(hex)
0.01005
def get_hierarchy_form(self, *args, **kwargs): """Pass through to provider HierarchyAdminSession.get_hierarchy_form_for_update""" # Implemented from kitosid template for - # osid.resource.BinAdminSession.get_bin_form_for_update_template # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'hierarchy_record_types' in kwargs: return self.get_hierarchy_form_for_create(*args, **kwargs) else: return self.get_hierarchy_form_for_update(*args, **kwargs)
0.005405
def get_fallbackservers(self, orgid, page=None): """Get Fallback server""" opts = {} if page: opts['page'] = page return self.api_call( ENDPOINTS['fallbackservers']['list'], dict(orgid=orgid), **opts)
0.007463
def get(self, name, typ): """ Gets a counter specified by its name. It counter does not exist or its type doesn't match the specified type it creates a new one. :param name: a counter name to retrieve. :param typ: a counter type. :return: an existing or newly created counter of the specified type. """ if name == None or len(name) == 0: raise Exception("Counter name was not set") self._lock.acquire() try: counter = self._cache[name] if name in self._cache else None if counter == None or counter.type != typ: counter = Counter(name, typ) self._cache[name] = counter return counter finally: self._lock.release()
0.004957
def get_channel_id(self): """Fetches id :return: id of youtube channel """ soup = BeautifulSoup( self.get_channel_page(), "lxml" ) # parser for source page channel_id = soup.find_all( "span", { "class": "channel-header-subscription-button-container" } ) # get all good spans channel_id = channel_id[0].find_all("button")[ 0] # get button in first span channel_id = channel_id["data-channel-external-id"] # get id return channel_id
0.003384
def load_scores_wiggle( fname ): """ Read a wiggle file and return a dict of BinnedArray objects keyed by chromosome. """ scores_by_chrom = dict() for chrom, pos, val in bx.wiggle.Reader( misc.open_compressed( fname ) ): if chrom not in scores_by_chrom: scores_by_chrom[chrom] = BinnedArray() scores_by_chrom[chrom][pos] = val return scores_by_chrom
0.019704
def copy_uri_options(hosts, mongodb_uri): """Returns a MongoDB URI to hosts with the options from mongodb_uri. """ if "?" in mongodb_uri: options = mongodb_uri.split("?", 1)[1] else: options = None uri = "mongodb://" + hosts if options: uri += "/?" + options return uri
0.00554
def compare_versions(x, y): """ Expects 2 strings in the format of 'X.Y.Z' where X, Y and Z are integers. It will compare the items which will organize things properly by their major, minor and bugfix version. :: >>> my_list = ['v1.13', 'v1.14.2', 'v1.14.1', 'v1.9', 'v1.1'] >>> sorted(my_list, cmp=compare_versions) ['v1.1', 'v1.9', 'v1.13', 'v1.14.1', 'v1.14.2'] """ def version_to_tuple(version): # Trim off the leading v version_list = version[1:].split('.', 2) if len(version_list) <= 3: [version_list.append(0) for _ in range(3 - len(version_list))] try: return tuple((int(version) for version in version_list)) except ValueError: # not an integer, so it goes to the bottom return (0, 0, 0) x_major, x_minor, x_bugfix = version_to_tuple(x) y_major, y_minor, y_bugfix = version_to_tuple(y) return (cmp(x_major, y_major) or cmp(x_minor, y_minor) or cmp(x_bugfix, y_bugfix))
0.001938
def tree(c): """ Display documentation contents with the 'tree' program. """ ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates" c.run('tree -Ca -I "{0}" {1}'.format(ignore, c.sphinx.source))
0.004367
def set_base_headers(self, hdr): """Set metadata in FITS headers.""" hdr['NUMXVER'] = (__version__, 'Numina package version') hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name') hdr['NUMRVER'] = (self.__version__, 'Numina recipe version') return hdr
0.006623
def remove_child(self, *sprites): """Remove one or several :class:`Sprite` sprites from scene """ # first drop focus scene = self.get_scene() if scene: child_sprites = list(self.all_child_sprites()) if scene._focus_sprite in child_sprites: scene._focus_sprite = None for sprite in sprites: if sprite in self.sprites: self.sprites.remove(sprite) sprite._scene = None sprite.parent = None self.disconnect_child(sprite) self._sort() self.redraw()
0.004878
def _run_parallel_job(self, job_id, array_id = None, no_log = False, nice = None, verbosity = 0): """Executes the code for this job on the local machine.""" environ = copy.deepcopy(os.environ) environ['JOB_ID'] = str(job_id) if array_id: environ['SGE_TASK_ID'] = str(array_id) else: environ['SGE_TASK_ID'] = 'undefined' # generate call to the wrapper script command = [self.wrapper_script, '-l%sd'%("v"*verbosity), self._database, 'run-job'] if nice is not None: command = ['nice', '-n%d'%nice] + command job, array_job = self._job_and_array(job_id, array_id) if job is None: # rare case: job was deleted before starting return None logger.info("Starting execution of Job '%s' (%s)", job.name, self._format_log(job_id, array_id, len(job.array))) # create log files if no_log or job.log_dir is None: out, err = sys.stdout, sys.stderr else: makedirs_safe(job.log_dir) # create line-buffered files for writing output and error status if array_job is not None: out, err = open(array_job.std_out_file(), 'w', 1), open(array_job.std_err_file(), 'w', 1) else: out, err = open(job.std_out_file(), 'w', 1), open(job.std_err_file(), 'w', 1) # return the subprocess pipe to the process try: return subprocess.Popen(command, env=environ, stdout=out, stderr=err, bufsize=1) except OSError as e: logger.error("Could not execute job '%s' (%s) locally\n- reason:\t%s\n- command line:\t%s\n- directory:\t%s\n- command:\t%s", job.name, self._format_log(job_id, array_id, len(job.array)), e, " ".join(job.get_command_line()), "." if job.exec_dir is None else job.exec_dir, " ".join(command)) job.finish(117, array_id) # ASCII 'O' return None
0.018384
def compare(self): """ Main comparison function """ """ Note: Make sure to be able to handle these ref/test scenarios: A: o----o---o---o x-------x----x B: o----o-----o---o x--------x--x--x C: o------o-----o---o x-x--------x--x--x D: o------o-----o---o x-x--------x-----x """ test_samp_num = 0 ref_samp_num = 0 # Iterate through the reference sample numbers while ref_samp_num < self.n_ref and test_samp_num < self.n_test: # Get the closest testing sample number for this reference sample closest_samp_num, smallest_samp_diff = ( self._get_closest_samp_num(ref_samp_num, test_samp_num)) # Get the closest testing sample number for the next reference # sample. This doesn't need to be called for the last index. if ref_samp_num < self.n_ref - 1: closest_samp_num_next, smallest_samp_diff_next = ( self._get_closest_samp_num(ref_samp_num + 1, test_samp_num)) else: # Set non-matching value if there is no next reference sample # to compete for the test sample closest_samp_num_next = -1 # Found a contested test sample number. Decide which # reference sample it belongs to. If the sample is closer to # the next reference sample, leave it to the next reference # sample and label this reference sample as unmatched. if (closest_samp_num == closest_samp_num_next and smallest_samp_diff_next < smallest_samp_diff): # Get the next closest sample for this reference sample, # if not already assigned to a previous sample. # It will be the previous testing sample number in any # possible case (scenario D below), or nothing. if closest_samp_num and (not ref_samp_num or closest_samp_num - 1 != self.matching_sample_nums[ref_samp_num - 1]): # The previous test annotation is inspected closest_samp_num = closest_samp_num - 1 smallest_samp_diff = abs(self.ref_sample[ref_samp_num] - self.test_sample[closest_samp_num]) # Assign the reference-test pair if close enough if smallest_samp_diff < self.window_width: self.matching_sample_nums[ref_samp_num] = closest_samp_num # Set the starting test sample number to inspect # for the next reference sample. test_samp_num = closest_samp_num + 1 # Otherwise there is no matching test annotation # If there is no clash, or the contested test sample is # closer to the current reference, keep the test sample # for this reference sample. else: # Assign the reference-test pair if close enough if smallest_samp_diff < self.window_width: self.matching_sample_nums[ref_samp_num] = closest_samp_num # Increment the starting test sample number to inspect # for the next reference sample. test_samp_num = closest_samp_num + 1 ref_samp_num += 1 self._calc_stats()
0.001715
def dasec(handle, bufsiz=_default_len_out, buflen=_default_len_out): """ Extract comments from the comment area of a binary DAS file. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasec_c.html :param handle: Handle of binary DAS file open with read access. :type handle: int :param bufsiz: Maximum size, in lines, of buffer. :type bufsiz: int :param buflen: Line length associated with buffer. :type buflen: int :return: Number of comments extracted from the DAS file, Buffer in which extracted comments are placed, Indicates whether all comments have been extracted. :rtype: tuple """ handle = ctypes.c_int(handle) buffer = stypes.emptyCharArray(buflen, bufsiz) bufsiz = ctypes.c_int(bufsiz) buflen = ctypes.c_int(buflen) n = ctypes.c_int(0) done = ctypes.c_int() libspice.dasec_c(handle, bufsiz, buflen, ctypes.byref(n), ctypes.byref(buffer), ctypes.byref(done)) return n.value, stypes.cVectorToPython(buffer), done.value
0.000933
def get_aliases(self, includename=True): """Retrieve the aliases of this object as a list of strings. Arguments --------- includename : bool Include the 'name' parameter in the list of aliases. """ # empty list if doesnt exist alias_quanta = self.get(self._KEYS.ALIAS, []) aliases = [aq[QUANTITY.VALUE] for aq in alias_quanta] if includename and self[self._KEYS.NAME] not in aliases: aliases = [self[self._KEYS.NAME]] + aliases return aliases
0.003656
def main(argv=None): """Run dvc CLI command. Args: argv: optional list of arguments to parse. sys.argv is used by default. Returns: int: command's return code. """ args = None cmd = None try: args = parse_args(argv) if args.quiet: logger.setLevel(logging.CRITICAL) elif args.verbose: logger.setLevel(logging.DEBUG) cmd = args.func(args) ret = cmd.run_cmd() except KeyboardInterrupt: logger.exception("interrupted by the user") ret = 252 except NotDvcRepoError: logger.exception("") ret = 253 except DvcParserError: ret = 254 except Exception: # pylint: disable=broad-except logger.exception("unexpected error") ret = 255 Analytics().send_cmd(cmd, args, ret) return ret
0.001156
def check_chunks(n_samples, n_features, chunks=None): """Validate and normalize the chunks argument for a dask.array Parameters ---------- n_samples, n_features : int Give the shape of the array chunks : int, sequence, optional, default None * For 'chunks=None', this picks a "good" default number of chunks based on the number of CPU cores. The default results in a block structure with one block per core along the first dimension (of roughly equal lengths) and a single block along the second dimension. This may or may not be appropriate for your use-case. The chunk size will be at least 100 along the first dimension. * When chunks is an int, we split the ``n_samples`` into ``chunks`` blocks along the first dimension, and a single block along the second. Again, the chunksize will be at least 100 along the first dimension. * When chunks is a sequence, we validate that it's length two and turn it into a tuple. Returns ------- chunks : tuple """ if chunks is None: chunks = (max(100, n_samples // cpu_count()), n_features) elif isinstance(chunks, Integral): chunks = (max(100, n_samples // chunks), n_features) elif isinstance(chunks, Sequence): chunks = tuple(chunks) if len(chunks) != 2: raise AssertionError("Chunks should be a 2-tuple.") else: raise ValueError("Unknown type of chunks: '{}'".format(type(chunks))) return chunks
0.000635
def copy_workspace(self, uri, new_name): ''' Copy the current workspace. Args: - uri (dict): the uri of the workspace being copied. Needs to have a did and wid key. - new_name (str): the new name of the copied workspace. Returns: - requests.Response: Onshape response data ''' payload = { 'isPublic': True, 'newName': new_name } return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload)
0.006957
def _add_relations(self, relations): """Add all of the relations for the services.""" for k, v in six.iteritems(relations): self.d.relate(k, v)
0.011696
def gen_methods(self, *args, **kwargs): '''Find all method names this input dispatches to. This method can accept *args, **kwargs, but it's the gen_dispatch method's job of passing specific args to handler methods. ''' dispatched = False for invoc, methodname in self.registry: args, kwargs = self.loads(invoc) yield getattr(self.inst, methodname), args, kwargs dispatched = True if dispatched: return # Try the generic handler. generic_handler = getattr(self.inst, 'generic_handler', None) if generic_handler is not None: yield generic_handler, args, kwargs # Give up. msg = 'No method was found for %r on %r.' raise self.DispatchError(msg % ((args, kwargs), self.inst))
0.002387
def kill_session_input_session_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") kill_session = ET.Element("kill_session") config = kill_session input = ET.SubElement(kill_session, "input") session_id = ET.SubElement(input, "session-id") session_id.text = kwargs.pop('session_id') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004329
def distance(image_path, other_image_path): """ Compute the hamming distance between two images""" image_hash = average_hash(image_path) other_image_hash = average_hash(other_image_path) return hash_distance(image_hash, other_image_hash)
0.003937
def load_transactions(input_file, **kwargs): """ Load transactions and returns a generator for transactions. Arguments: input_file -- An input file. Keyword arguments: delimiter -- The delimiter of the transaction. """ delimiter = kwargs.get('delimiter', '\t') for transaction in csv.reader(input_file, delimiter=delimiter): yield transaction if transaction else ['']
0.002375
def _set_show_vcs(self, v, load=False): """ Setter method for show_vcs, mapped from YANG variable /brocade_vcs_rpc/show_vcs (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_vcs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_vcs() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_vcs.show_vcs, is_leaf=True, yang_name="show-vcs", rest_name="show-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getclusterinfo-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_vcs must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_vcs.show_vcs, is_leaf=True, yang_name="show-vcs", rest_name="show-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getclusterinfo-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True)""", }) self.__show_vcs = t if hasattr(self, '_set'): self._set()
0.00578
def plot(self, figsize=None, rotation=45): """Plot the confusion matrix. Args: figsize: tuple (x, y) of ints. Sets the size of the figure rotation: the rotation angle of the labels on the x-axis. """ fig, ax = plt.subplots(figsize=figsize) plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto') plt.title('Confusion matrix') plt.colorbar() tick_marks = np.arange(len(self._labels)) plt.xticks(tick_marks, self._labels, rotation=rotation) plt.yticks(tick_marks, self._labels) if isinstance(self._cm, list): # If cm is created from BigQuery then it is a list. thresh = max(max(self._cm)) / 2. for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))): plt.text(j, i, self._cm[i][j], horizontalalignment="center", color="white" if self._cm[i][j] > thresh else "black") else: # If cm is created from csv then it is a sklearn's confusion_matrix. thresh = self._cm.max() / 2. for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))): plt.text(j, i, self._cm[i, j], horizontalalignment="center", color="white" if self._cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
0.007396
def gsea_pval(es, esnull): """Compute nominal p-value. From article (PNAS): estimate nominal p-value for S from esnull by using the positive or negative portion of the distribution corresponding to the sign of the observed ES(S). """ # to speed up, using numpy function to compute pval in parallel. condlist = [ es < 0, es >=0] choicelist = [np.sum(esnull < es.reshape(len(es),1), axis=1)/ np.sum(esnull < 0, axis=1), np.sum(esnull >= es.reshape(len(es),1), axis=1)/ np.sum(esnull >= 0, axis=1)] pval = np.select(condlist, choicelist) return pval
0.01473
def _compute_length(nodes): r"""Approximately compute the length of a curve. .. _QUADPACK: https://en.wikipedia.org/wiki/QUADPACK If ``degree`` is :math:`n`, then the Hodograph curve :math:`B'(s)` is degree :math:`d = n - 1`. Using this curve, we approximate the integral: .. math:: \int_{B\left(\left[0, 1\right]\right)} 1 \, d\mathbf{x} = \int_0^1 \left\lVert B'(s) \right\rVert_2 \, ds using `QUADPACK`_ (via SciPy). .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes defining a curve. Returns: float: The length of the curve. Raises: OSError: If SciPy is not installed. """ _, num_nodes = np.shape(nodes) # NOTE: We somewhat replicate code in ``evaluate_hodograph()`` # here. This is so we don't re-compute the nodes for the first # derivative every time it is evaluated. first_deriv = (num_nodes - 1) * (nodes[:, 1:] - nodes[:, :-1]) if num_nodes == 2: # NOTE: We convert to 1D to make sure NumPy uses vector norm. return np.linalg.norm(first_deriv[:, 0], ord=2) if _scipy_int is None: raise OSError("This function requires SciPy for quadrature.") size_func = functools.partial(vec_size, first_deriv) length, _ = _scipy_int.quad(size_func, 0.0, 1.0) return length
0.00069
def add_new(self, command): """Add a new entry to the queue.""" self.queue[self.next_key] = command self.queue[self.next_key]['status'] = 'queued' self.queue[self.next_key]['returncode'] = '' self.queue[self.next_key]['stdout'] = '' self.queue[self.next_key]['stderr'] = '' self.queue[self.next_key]['start'] = '' self.queue[self.next_key]['end'] = '' self.next_key += 1 self.write()
0.00431
def visit_Tuple(self, node): ''' A tuple is abstracted as an ordered container of its values >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return a, b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Tuple) (a, b) => ['|[0]=a|', '|[1]=b|'] where the |[i]=id| notation means something that may contain ``id`` at index ``i``. ''' if node.elts: elts_aliases = set() for i, elt in enumerate(node.elts): elt_aliases = self.visit(elt) elts_aliases.update(ContainerOf(alias, i) for alias in elt_aliases) else: elts_aliases = None return self.add(node, elts_aliases)
0.002286
def get_users(self, email): """ Make sure users are staff users. Additionally to the other PasswordResetForm conditions ensure that the user is a staff user before sending them a password reset email. :param email: Textual email address. :return: List of users. """ # Django 1.8 supports this feature. if hasattr(super(PasswordResetForm, self), 'get_users'): return ( u for u in super(PasswordResetForm, self).get_users(email) if u.is_staff and u.is_active ) # Django Django < 1.8 support we can do this manually. active_users = get_user_model()._default_manager.filter(email__iexact=email, is_active=True) return (u for u in active_users if u.has_usable_password() and u.is_staff and u.is_active)
0.004651
def save(self, filename, chunk_size=CHUNK_SIZE): """ Write file to disk :param filename: :param chunk_size: """ with open(filename, 'wb') as fp: while True: # Chunk writer data = self.file.read(chunk_size) if not data: break fp.write(data) # Flush all data fp.flush() # Go to start of file. if self.file.seekable(): self.file.seek(0)
0.003766
def get(datasets_identifiers, identifier_type='hid', history_id=None): """ Given the history_id that is displayed to the user, this function will download the file[s] from the history and stores them under /import/ Return value[s] are the path[s] to the dataset[s] stored under /import/ """ history_id = history_id or os.environ['HISTORY_ID'] # The object version of bioblend is to slow in retrieving all datasets from a history # fallback to the non-object path gi = get_galaxy_connection(history_id=history_id, obj=False) for dataset_identifier in datasets_identifiers: file_path = '/import/%s' % dataset_identifier log.debug('Downloading gx=%s history=%s dataset=%s', gi, history_id, dataset_identifier) # Cache the file requests. E.g. in the example of someone doing something # silly like a get() for a Galaxy file in a for-loop, wouldn't want to # re-download every time and add that overhead. if not os.path.exists(file_path): hc = HistoryClient(gi) dc = DatasetClient(gi) history = hc.show_history(history_id, contents=True) datasets = {ds[identifier_type]: ds['id'] for ds in history} if identifier_type == 'hid': dataset_identifier = int(dataset_identifier) dc.download_dataset(datasets[dataset_identifier], file_path=file_path, use_default_filename=False) else: log.debug('Cached, not re-downloading') return file_path
0.003243
def udf(f): """Create a SQLite scalar UDF from `f` Parameters ---------- f A callable object Returns ------- callable A callable object that returns ``None`` if any of its inputs are ``None``. """ @functools.wraps(f) def wrapper(*args): if any(arg is None for arg in args): return None return f(*args) _SQLITE_UDF_REGISTRY.add(wrapper) return wrapper
0.002203
def set_fan_timer(self, timer): """ :param timer: an int between fan_timer_range :return: nothing """ desired_state = {"timer": timer} resp = self.api_interface.set_device_state(self, { "desired_state": desired_state }) self._update_state_from_response(resp)
0.005952
def pca_svd(x): """Calculate PCA using SVD. Parameters ---------- x : ndarray, shape (channels, samples) Two-dimensional input data. Returns ------- w : ndarray, shape (channels, channels) Eigenvectors (principal components) (in columns). s : ndarray, shape (channels,) Eigenvalues. """ w, s, _ = np.linalg.svd(x, full_matrices=False) return w, s ** 2
0.006928
def add_group(id, description=None): """ Adds group to the DCOS Enterprise. If not description is provided the id will be used for the description. :param id: group id :type id: str :param desc: description of user :type desc: str """ if not description: description = id data = { 'description': description } acl_url = urljoin(_acl_url(), 'groups/{}'.format(id)) try: r = http.put(acl_url, json=data) assert r.status_code == 201 except DCOSHTTPException as e: if e.response.status_code != 409: raise
0.001595
def _action__get(self): """ Get/set the form's ``action`` attribute. """ base_url = self.base_url action = self.get('action') if base_url and action is not None: return urljoin(base_url, action) else: return action
0.006803
def write_aims(filename, atoms): """Method to write FHI-aims geometry files in phonopy context.""" lines = "" lines += "# geometry.in for FHI-aims \n" lines += "# | generated by phonopy.FHIaims.write_aims() \n" lattice_vector_line = "lattice_vector " + "%16.16f "*3 + "\n" for vec in atoms.get_cell(): lines += lattice_vector_line % tuple(vec) N = atoms.get_number_of_atoms() atom_line = "atom " + "%16.16f "*3 + "%s \n" positions = atoms.get_positions() symbols = atoms.get_chemical_symbols() initial_moment_line = "initial_moment %16.6f\n" magmoms = atoms.get_magnetic_moments() for n in range(N): lines += atom_line % (tuple(positions[n]) + (symbols[n],)) if magmoms is not None: lines += initial_moment_line % magmoms[n] with open(filename, 'w') as f: f.write(lines)
0.00114
def main(): """Execute all checks.""" check_python_version() check_python_modules() check_executables() home = os.path.expanduser("~") print("\033[1mCheck files\033[0m") rcfile = os.path.join(home, ".hwrtrc") if os.path.isfile(rcfile): print("~/.hwrtrc... %sFOUND%s" % (Bcolors.OKGREEN, Bcolors.ENDC)) else: print("~/.hwrtrc... %sNOT FOUND%s" % (Bcolors.FAIL, Bcolors.ENDC)) misc_path = pkg_resources.resource_filename('hwrt', 'misc/') print("misc-path: %s" % misc_path)
0.001792
def ITE(s, d1, d0, simplify=True): """Expression If-Then-Else (ITE) operator If *simplify* is ``True``, return a simplified expression. """ s = Expression.box(s).node d1 = Expression.box(d1).node d0 = Expression.box(d0).node y = exprnode.ite(s, d1, d0) if simplify: y = y.simplify() return _expr(y)
0.002915
def _parse_info(self, data): """Parse the first line of a GNTP message to get security and other info values :param string data: GNTP Message :return dict: Parsed GNTP Info line """ match = GNTP_INFO_LINE.match(data) if not match: raise errors.ParseError('ERROR_PARSING_INFO_LINE') info = match.groupdict() if info['encryptionAlgorithmID'] == 'NONE': info['encryptionAlgorithmID'] = None return info
0.03271
def in_labelset(xmrs, nodeids, label=None): """ Test if all nodeids share a label. Args: nodeids: iterable of nodeids label (str, optional): the label that all nodeids must share Returns: bool: `True` if all nodeids share a label, otherwise `False` """ nodeids = set(nodeids) if label is None: label = xmrs.ep(next(iter(nodeids))).label return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])
0.002179
def inverse(self, name=None): """Returns a `sonnet` module to compute inverse affine transforms. The function first assembles a network that given the constraints of the current AffineGridWarper and a set of input parameters, retrieves the coefficients of the corresponding inverse affine transform, then feeds its output into a new AffineGridWarper setup to correctly warp the `output` space into the `source` space. Args: name: Name of module implementing the inverse grid transformation. Returns: A `sonnet` module performing the inverse affine transform of a reference grid of points via an AffineGridWarper module. Raises: tf.errors.UnimplementedError: If the function is called on a non 2D instance of AffineGridWarper. """ if self._num_coeff != 6: raise tf.errors.UnimplementedError('AffineGridWarper currently supports' 'inversion only for the 2D case.') def _affine_grid_warper_inverse(inputs): """Assembles network to compute inverse affine transformation. Each `inputs` row potentially contains [a, b, tx, c, d, ty] corresponding to an affine matrix: A = [a, b, tx], [c, d, ty] We want to generate a tensor containing the coefficients of the corresponding inverse affine transformation in a constraints-aware fashion. Calling M: M = [a, b] [c, d] the affine matrix for the inverse transform is: A_in = [M^(-1), M^-1 * [-tx, -tx]^T] where M^(-1) = (ad - bc)^(-1) * [ d, -b] [-c, a] Args: inputs: Tensor containing a batch of transformation parameters. Returns: A tensorflow graph performing the inverse affine transformation parametrized by the input coefficients. """ batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0) index = iter(range(6)) def get_variable(constraint): if constraint is None: i = next(index) return inputs[:, i:i+1] else: return tf.fill(constant_shape, tf.constant(constraint, dtype=inputs.dtype)) constraints = chain.from_iterable(self.constraints) a, b, tx, c, d, ty = (get_variable(constr) for constr in constraints) det = a * d - b * c a_inv = d / det b_inv = -b / det c_inv = -c / det d_inv = a / det m_inv = basic.BatchReshape( [2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1)) txy = tf.expand_dims(tf.concat([tx, ty], 1), 2) txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy)) tx_inv = txy_inv[:, 0:1] ty_inv = txy_inv[:, 1:2] inverse_gw_inputs = tf.concat( [a_inv, b_inv, -tx_inv, c_inv, d_inv, -ty_inv], 1) agw = AffineGridWarper(self.output_shape, self.source_shape) return agw(inverse_gw_inputs) # pylint: disable=not-callable if name is None: name = self.module_name + '_inverse' return base.Module(_affine_grid_warper_inverse, name=name)
0.009135
def read_in_admin1(filepath): """ Small helper function to read in a admin1 code <--> admin1 name document. Parameters ---------- filepath: string path to the admin1 mapping JSON. This file is usually mordecai/resources/data/admin1CodesASCII.json Returns ------- admin1_dict: dictionary keys are country + admin1codes, values are names Example: "US.OK" : "Oklahoma" Example: "SE.21": "Uppsala" """ with open(filepath) as admin1file: admin1_dict = json.loads(admin1file.read()) return admin1_dict
0.001592
def addScalarBar(self, c=None, title="", horizontal=False, vmin=None, vmax=None): """ Add a 2D scalar bar to actor. .. hint:: |mesh_bands| |mesh_bands.py|_ """ # book it, it will be created by Plotter.show() later self.scalarbar = [c, title, horizontal, vmin, vmax] return self
0.008982
def normalize_name(name): """ Given a key name (e.g. "LEFT CONTROL"), clean up the string and convert to the canonical representation (e.g. "left ctrl") if one is known. """ if not name or not isinstance(name, basestring): raise ValueError('Can only normalize non-empty string names. Unexpected '+ repr(name)) if len(name) > 1: name = name.lower() if name != '_' and '_' in name: name = name.replace('_', ' ') return canonical_names.get(name, name)
0.005929
def get(self): """ *get the check_coverage object* **Return:** - ``check_coverage`` """ self.log.info('starting the ``get`` method') match = self._query_sdss() self.log.info('completed the ``get`` method') return match
0.006734
def _make_association(self, clk=None, rst=None) -> None: """ Associate this object with specified clk/rst """ if clk is not None: assert self._associatedClk is None self._associatedClk = clk if rst is not None: assert self._associatedRst is None self._associatedRst = rst
0.005556
def validate_changeset(changeset): """Validate a changeset is compatible with Amazon's API spec. Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>) Returns: [ errors ] list of error strings or [].""" errors = [] changes = changeset.findall('.//{%s}Change' % R53_XMLNS) num_changes = len(changes) if num_changes == 0: errors.append('changeset must have at least one <Change> element') if num_changes > 100: errors.append('changeset has %d <Change> elements: max is 100' % num_changes) rrs = changeset.findall('.//{%s}ResourceRecord' % R53_XMLNS) num_rrs = len(rrs) if num_rrs > 1000: errors.append('changeset has %d ResourceRecord elements: max is 1000' % num_rrs) values = changeset.findall('.//{%s}Value' % R53_XMLNS) num_chars = 0 for value in values: num_chars += len(value.text) if num_chars > 10000: errors.append('changeset has %d chars in <Value> text: max is 10000' % num_chars) return errors
0.01848
def reign_year_to_ad(reign_year: int, reign: int) -> int: """ Reign year of Chakri dynasty, Thailand """ if int(reign) == 10: ad = int(reign_year) + 2015 elif int(reign) == 9: ad = int(reign_year) + 1945 elif int(reign) == 8: ad = int(reign_year) + 1928 elif int(reign) == 7: ad = int(reign_year) + 1924 return ad
0.002653
def update_lbaas_pool(self, lbaas_pool, body=None): """Updates a lbaas_pool.""" return self.put(self.lbaas_pool_path % (lbaas_pool), body=body)
0.010929
def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return izip(a, b)
0.007634
def assign_extension_to_users(self, body): """AssignExtensionToUsers. [Preview API] Assigns the access to the given extension for a given list of users :param :class:`<ExtensionAssignment> <azure.devops.v5_0.licensing.models.ExtensionAssignment>` body: The extension assignment details. :rtype: [ExtensionOperationResult] """ content = self._serialize.body(body, 'ExtensionAssignment') response = self._send(http_method='PUT', location_id='8cec75ea-044f-4245-ab0d-a82dafcc85ea', version='5.0-preview.1', content=content) return self._deserialize('[ExtensionOperationResult]', self._unwrap_collection(response))
0.007853
def per_callback_query_chat_id(types='all'): """ :param types: ``all`` or a list of chat types (``private``, ``group``, ``channel``) :return: a seeder function that returns a callback query's originating chat id if the chat type is in ``types``. """ def f(msg): if (flavor(msg) == 'callback_query' and 'message' in msg and (types == 'all' or msg['message']['chat']['type'] in types)): return msg['message']['chat']['id'] else: return None return f
0.00365
def route(regex, method, name): """ Route the decorated view. :param regex: A string describing a regular expression to which the request path will be matched. :param method: A string describing the HTTP method that this view accepts. :param name: A string describing the name of the URL pattern. ``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns a string describing a regular expression to which the request path will be matched. ``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns a string describing the name of the URL pattern. """ def decorator(function): function.route = routes.route( regex = regex, view = function.__name__, method = method, name = name ) @wraps(function) def wrapper(self, *args, **kwargs): return function(self, *args, **kwargs) return wrapper return decorator
0.014409
def fit(self, X, y=None, input_type='affinity'): """ Fit the model from data in X. Parameters ---------- input_type : string, one of: 'similarity', 'distance' or 'data'. The values of input data X. (default = 'data') X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. If self.input_type is similarity: X : array-like, shape (n_samples, n_samples), copy the similarity matrix X to S. """ X = self._validate_input(X, input_type) self.fit_geometry(X, input_type) random_state = check_random_state(self.random_state) self.embedding_, self.eigen_vectors_, self.P_ = spectral_clustering(self.geom_, K = self.K, eigen_solver = self.eigen_solver, random_state = self.random_state, solver_kwds = self.solver_kwds, renormalize = self.renormalize, stabalize = self.stabalize, additional_vectors = self.additional_vectors)
0.022973
def UniversalCRTSdkDir(self): """ Microsoft Universal CRT SDK directory. """ # Set Kit Roots versions for specified MSVC++ version if self.vc_ver >= 14.0: vers = ('10', '81') else: vers = () # Find path of the more recent Kit for ver in vers: sdkdir = self.ri.lookup(self.ri.windows_kits_roots, 'kitsroot%s' % ver) if sdkdir: break return sdkdir or ''
0.003817
def i2repr(self, pkt, x): """Convert internal value to a nice representation""" if len(hex(self.i2m(pkt, x))) < 7: # short address return hex(self.i2m(pkt, x)) else: # long address x = "%016x" % self.i2m(pkt, x) return ":".join(["%s%s" % (x[i], x[i + 1]) for i in range(0, len(x), 2)])
0.008646
def get_cytoband_maps(names=[]): """Load all cytoband maps >>> maps = get_cytoband_maps() >>> maps["ucsc-hg38"]["1"]["p32.2"] [55600000, 58500000, 'gpos50'] >>> maps["ucsc-hg19"]["1"]["p32.2"] [56100000, 59000000, 'gpos50'] """ if names == []: names = get_cytoband_names() return {name: get_cytoband_map(name) for name in names}
0.002681