code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def _get_vcap_services(vcap_services=None): vcap_services = vcap_services or os.environ.get() if not vcap_services: raise ValueError( "VCAP_SERVICES information must be supplied as a parameter or as environment variable ") if isinstance(vcap_services, dict): return vcap_services try: try: with open(vcap_services) as vcap_json_data: vcap_services = json.load(vcap_json_data) except: raise ValueError("VCAP_SERVICES information is not JSON or a file containing JSON:", vcap_services) return vcap_services
Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If `vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable. Args: vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file. vcap_services (dict): Return the dict as is. Returns: dict: A dict representation of the VCAP Services information. Raises: ValueError: * if `vcap_services` nor VCAP_SERVICES environment variable are specified. * cannot parse `vcap_services` as a JSON string nor as a filename.
def _parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=_CliFormatter) parser.add_argument(, , action=, help=) fb_group = parser.add_argument_group() fb_group.add_argument( , , help=( )) fb_group.add_argument( , help=) fb_group.add_argument( , help=) fb_group.add_argument( , help=) error_group = parser.add_argument_group() error_group.add_argument(, , help=) error_group.add_argument(, help=) error_group.add_argument(, help=( )) parser.set_defaults(**_defaults()) return parser.parse_args()
Parse and return command line arguments.
def _union_with_dsis(self, dsis): copied = self.copy() for a in dsis._si_set: copied = copied.union(a) if isinstance(copied, DiscreteStridedIntervalSet): copied._update_bounds(dsis) return copied.normalize()
Union with another DiscreteStridedIntervalSet. :param dsis: :return:
def silent(duration=1000, frame_rate=11025): seg = pydub.AudioSegment.silent(duration=duration, frame_rate=frame_rate) return AudioSegment(seg, "")
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence. :param duration: The duration of the returned object in ms. :param frame_rate: The samples per second of the returned object. :returns: AudioSegment object filled with pure digital silence.
def get_attrs_by_path(self, field_path, stop_first=False): index_list, next_field = self._get_indexes_by_path(field_path) values = [] for idx in index_list: if next_field: try: res = self[idx].get_attrs_by_path(next_field, stop_first=stop_first) if res is None: continue values.extend(res) if stop_first and len(values): break except AttributeError: pass else: if stop_first: return [self[idx], ] values.append(self[idx]) return values if len(values) else None
It returns list of values looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: list or None. :param stop_first: Stop iteration on first value looked up. Default: False. :type stop_first: bool :return: value
def three_sum(array): res = set() array.sort() for i in range(len(array) - 2): if i > 0 and array[i] == array[i - 1]: continue l, r = i + 1, len(array) - 1 while l < r: s = array[i] + array[l] + array[r] if s > 0: r -= 1 elif s < 0: l += 1 else: res.add((array[i], array[l], array[r])) while l < r and array[l] == array[l + 1]: l += 1 while l < r and array[r] == array[r - 1]: r -= 1 l += 1 r -= 1 return res
:param array: List[int] :return: Set[ Tuple[int, int, int] ]
def search_response(self, request): logger.debug("Cache Search Response") if self.cache.is_empty() is True: logger.debug("Empty Cache") return None if self.mode == defines.FORWARD_PROXY: search_key = CacheKey(request) else: search_key = ReverseCacheKey(request) response = self.cache.get(search_key) return response
creates a key from the request and searches the cache with it :param request: :return CacheElement: returns None if there's a cache miss
def isUserCert(self, name): myuser crtpath = self._getPathJoin(, % name) return os.path.isfile(crtpath)
Checks if a user certificate exists. Args: name (str): The name of the user keypair. Examples: Check if the user cert "myuser" exists: exists = cdir.isUserCert('myuser') Returns: bool: True if the certificate is present, False otherwise.
def check_apartment_number(self, token): apartment_regexes = [r, , " r, r, r, r, r, r, r, r, r, r] for regex in apartment_regexes: if re.match(regex, token.lower()): self.apartment = self._clean(token) return True if self.apartment and token.lower() in [, ]: self.apartment = self._clean(token + + self.apartment) return True if not self.street_suffix and not self.street and not self.apartment: if re.match(r, token.lower()): self.apartment = self._clean(token) return True return False
Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out, because it has a lot of false positives.
def video_bitwise_bottom(x, model_hparams, vocab_size): pixel_embedding_size = 64 inputs = x with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE): common_layers.summarize_video(inputs, "bottom") assert vocab_size == 256 embedded = discretization.int_to_bit_embed(inputs, 8, pixel_embedding_size) return tf.layers.dense( embedded, model_hparams.hidden_size, name="merge_pixel_embedded_frames")
Bottom transformation for embedding video bitwise.
def _build_loop(self, lexer): fields = [] values = [] token = next(lexer) while token[0] == u"_": fields.append(token[1:]) token = next(lexer) while token != u"stop_": values.append(token) token = next(lexer) assert float(len(values) / len(fields)).is_integer(), \ "Error in loop construction: number of fields must be equal to number of values." values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))] return fields, values
Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple`
def find_packages(path: str) -> List[str]: ret = [] for root, _dir, files in os.walk(path): if in files: ret.append(root.replace("/", ".")) return sorted(ret)
A better version of find_packages than what setuptools offers This function needs to be deterministic. :param path: :return:
def list_remote(local_root): command = [, , , ] try: output = run_command(local_root, command) except CalledProcessError as exc: raise GitError(, exc.output) if in output: parsed = list() for group in (m.groupdict() for m in RE_REMOTE.finditer(output)): dereferenced, name, kind = group[].endswith(), group[][:-3], group[] if dereferenced and parsed and kind == parsed[-1][] == and name == parsed[-1][]: parsed[-1][] = group[] else: parsed.append(group) else: parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)] return [[i[], i[], i[]] for i in parsed]
Get remote branch/tag latest SHAs. :raise GitError: When git ls-remote fails. :param str local_root: Local path to git root directory. :return: List of tuples containing strings. Each tuple is sha, name, kind. :rtype: list
def read(self, size=-1): self._check_open() if not self._remaining(): return data_list = [] while True: remaining = self._buffer.remaining() if size >= 0 and size < remaining: data_list.append(self._buffer.read(size)) self._offset += size break else: size -= remaining self._offset += remaining data_list.append(self._buffer.read()) if self._buffer_future is None: if size < 0 or size >= self._remaining(): needs = self._remaining() else: needs = size data_list.extend(self._get_segments(self._offset, needs)) self._offset += needs break if self._buffer_future: self._buffer.reset(self._buffer_future.get_result()) self._buffer_future = None if self._buffer_future is None: self._request_next_buffer() return .join(data_list)
Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is closed.
def register(linter): linter.register_checker(BasicErrorChecker(linter)) linter.register_checker(BasicChecker(linter)) linter.register_checker(NameChecker(linter)) linter.register_checker(DocStringChecker(linter)) linter.register_checker(PassChecker(linter)) linter.register_checker(ComparisonChecker(linter))
required method to auto register this checker
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False): if ssbio.utils.is_ipynb(): import nglview as nv else: raise EnvironmentError() if not self.structure_file: raise ValueError("Structure file not loaded") only_chains = ssbio.utils.force_list(only_chains) to_show_chains = for c in only_chains: to_show_chains += .format(c) to_show_chains = to_show_chains.strip() to_show_chains += if self.file_type == or self.file_type == : view = nv.NGLWidget() view.add_component(self.structure_path) else: view = nv.show_structure_file(self.structure_path, gui=gui) if recolor: view.clear_representations() if only_chains: view.add_cartoon(selection=.format(to_show_chains), color=, opacity=opacity) else: view.add_cartoon(selection=, color=, opacity=opacity) elif only_chains: view.clear_representations() view.add_cartoon(selection=.format(to_show_chains), color=, opacity=opacity) return view
Use NGLviewer to display a structure in a Jupyter notebook Args: only_chains (str, list): Chain ID or IDs to display opacity (float): Opacity of the structure recolor (bool): If structure should be cleaned and recolored to silver gui (bool): If the NGLview GUI should show up Returns: NGLviewer object
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor): num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4) denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem) * Flow num2 = Flow * KMinor denom2 = 16 * np.pi * viscosity_kinematic_chem(conc_chem, temp, en_chem) len = ((num1/denom1) - (num2/denom2)) return len.magnitude
Length of tube required to get desired head loss at maximum flow based on the Hagen-Poiseuille equation.
def get(self, index, id, fields=None, doc_type=EsConst.ALL_VALUES, **query_params): if fields: query_params[EsConst.FIELDS] = fields path = self._es_parser.make_path(index, doc_type, id) result = yield self._perform_request(HttpMethod.GET, path, params=query_params) returnValue(result)
Retrieve specific record by id `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_ :param index: the index name to query :param id: the id of the record :param fields: the fields you what to fetch from the record (str separated by comma's) :param doc_type: the doc type to search in :param query_params: params :return:
def pair_visual(*args, **kwargs): warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. " "cleverhans.utils.pair_visual may be removed on or after " "2019-04-24.") from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual return new_pair_visual(*args, **kwargs)
Deprecation wrapper
def _HandleHomepage(self, request): _ = request env = jinja2.Environment( loader=jinja2.FileSystemLoader(config.CONFIG["AdminUI.template_root"]), autoescape=True) create_time = psutil.Process(os.getpid()).create_time() context = { "heading": config.CONFIG["AdminUI.heading"], "report_url": config.CONFIG["AdminUI.report_url"], "help_url": config.CONFIG["AdminUI.help_url"], "timestamp": utils.SmartStr(create_time), "use_precompiled_js": config.CONFIG["AdminUI.use_precompiled_js"], "firebase_api_key": config.CONFIG["AdminUI.firebase_api_key"], "firebase_auth_domain": config.CONFIG["AdminUI.firebase_auth_domain"], "firebase_auth_provider": config.CONFIG["AdminUI.firebase_auth_provider"], "grr_version": config.CONFIG["Source.version_string"] } template = env.get_template("base.html") response = werkzeug_wrappers.Response( template.render(context), mimetype="text/html") try: StoreCSRFCookie(request.user, response) except RequestHasNoUser: pass return response
Renders GRR home page by rendering base.html Jinja template.
def environment_schedule_unset(self, name): if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("environmentScheduleUnset", in_p=[name])
Schedules unsetting (removing) an environment variable when creating the next guest process. This affects the :py:func:`IGuestSession.environment_changes` attribute. in name of type str Name of the environment variable to unset. This cannot be empty nor can it contain any equal signs.
def write_lock(self): me = self._current_thread() i_am_writer = self.is_writer(check_pending=False) if self.is_reader() and not i_am_writer: raise RuntimeError("Reader %s to writer privilege" " escalation not allowed" % me) if i_am_writer: yield self else: with self._cond: self._pending_writers.append(me) while True: if len(self._readers) == 0 and self._writer is None: if self._pending_writers[0] == me: self._writer = self._pending_writers.popleft() break self._cond.wait() try: yield self finally: with self._cond: self._writer = None self._cond.notify_all()
Context manager that grants a write lock. Will wait until no active readers. Blocks readers after acquiring. Guaranteed for locks to be processed in fair order (FIFO). Raises a ``RuntimeError`` if an active reader attempts to acquire a lock.
def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): return [g[] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)]
Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1
def create_mongo_db(database_name, collection_name, initial_document): response_dict = {} try: mongodb_client_url = getattr(settings, , ) mc = MongoClient(mongodb_client_url,document_class=OrderedDict) db = mc[str(database_name)] collection = db[str(collection_name)] d = json.loads(initial_document, object_pairs_hook=OrderedDict) collection.save(d) except: response_dict[] = str(sys.exc_info()) return response_dict
Create a new database and collection by inserting one document.
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid): "Create an instance of `ClassificationInterpretation`" preds = learn.get_preds(ds_type=ds_type, with_loss=True) return cls(learn, *preds)
Create an instance of `ClassificationInterpretation`
def update_entity(self, entity, agent=None, metadata=None): body = {} if agent: body["agent_id"] = utils.get_id(agent) if metadata: body["metadata"] = metadata if body: uri = "/%s/%s" % (self.uri_base, utils.get_id(entity)) resp, body = self.api.method_put(uri, body=body)
Updates the specified entity's values with the supplied parameters.
def filter_defragment(self, threshold, mode=, filt=True, samples=None, subset=None): if samples is not None: subset = self.make_subset(samples) samples = self._get_samples(subset) for s in samples: f = self.data[s].filt.grab_filt(filt) self.data[s].filt.add(name=.format(mode, threshold), filt=filters.defrag(f, threshold, mode), info=.format(mode, threshold), params=(threshold, mode, filt, samples, subset))
Remove 'fragments' from the calculated filter Parameters ---------- threshold : int Contiguous data regions that contain this number or fewer points are considered 'fragments' mode : str Specifies wither to 'include' or 'exclude' the identified fragments. filt : bool or filt string Which filter to apply the defragmenter to. Defaults to True samples : array_like or None Which samples to apply this filter to. If None, applies to all samples. subset : str or number The subset of samples (defined by make_subset) you want to apply the filter to. Returns ------- None
def deregisterevent(self, event_name): if event_name in self._pollEvents._callback: del self._pollEvents._callback[event_name] return self._remote_deregisterevent(event_name)
Remove callback of registered event @param event_name: Event name in at-spi format. @type event_name: string @return: 1 if registration was successful, 0 if not. @rtype: integer
def get_carrier_concentration(self): return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]] for temp in self._carrier_conc}
gives the carrier concentration (in cm^-3) Returns a dictionary {temp:[]} with an array of carrier concentration (in cm^-3) at each temperature The array relates to each step of electron chemical potential
def _page(q, chunk=1000): offset = 0 while True: r = False for elem in q.limit(chunk).offset(offset): r = True yield elem offset += chunk if not r: break
Quick utility to page a query, 1000 items at a time. We need this so we don't OOM (out of memory) ourselves loading the world.
def permissions(self, addr, permissions=None): out = self.mem.permissions(addr, permissions) self.state.unicorn.uncache_page(addr) return out
Retrieve the permissions of the page at address `addr`. :param addr: address to get the page permissions :param permissions: Integer or BVV to optionally set page permissions to :return: AST representing the permissions on the page
def commit_channel(self, channel_id): payload = { "channel_id":channel_id, "stage": config.STAGE, } response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload)) if response.status_code != 200: config.LOGGER.error("\n\nCould not activate channel: {}\n".format(response._content.decode())) if response.status_code == 403: config.LOGGER.error("Channel can be viewed at {}\n\n".format(config.open_channel_url(channel_id, staging=True))) sys.exit() response.raise_for_status() new_channel = json.loads(response._content.decode("utf-8")) channel_link = config.open_channel_url(new_channel[]) return channel_id, channel_link
commit_channel: commits channel to Kolibri Studio Args: channel_id (str): channel's id on Kolibri Studio Returns: channel id and link to uploadedchannel
def send_msg_to_clients(client_ids, msg, error=False): if error: stream = "stderr" else: stream = "stdout" response = [{"message": None, "type": "console", "payload": msg, "stream": stream}] for client_id in client_ids: logger.info("emiting message to websocket client id " + client_id) socketio.emit( "gdb_response", response, namespace="/gdb_listener", room=client_id )
Send message to all clients
def getAllowedInstruments(self): service = self.getAnalysisService() if not service: return [] instruments = [] if self.getInstrumentEntryOfResults(): instruments = service.getInstruments() if self.getManualEntryOfResults(): for meth in self.getAllowedMethods(): instruments += meth.getInstruments() return list(set(instruments))
Returns the allowed instruments for this analysis, either if the instrument was assigned directly (by using "Allows instrument entry of results") or indirectly via Method (by using "Allows manual entry of results") in Analysis Service edit view. :return: A list of instruments allowed for this Analysis :rtype: list of instruments
def normalize_rgb(r, g, b, a): r = int(r, 10) g = int(g, 10) b = int(b, 10) if a: a = float(a) * 256 if r > 255 or g > 255 or b > 255 or (a and a > 255): return None color = % (r, g, b) if a: color += % int(a) return color
Transform a rgb[a] color to #hex[a].
def close(self): if self._closed: return self._closed = True self._pep_service._unclaim(self.node_namespace) self._unregister()
Unclaim the PEP node and unregister the registered features. It is not necessary to call close if this claim is managed by :class:`~aioxmpp.pep.register_pep_node`.
def incrementSub(self, amount=1): self._subProgressBar.setValue(self.subValue() + amount) QApplication.instance().processEvents()
Increments the sub-progress bar by amount.
def get_frames_singleimage(self): frame = self.captures[0].read()[1] height, width, colors = frame.shape left_frame = frame[:, :width/2, :] right_frame = frame[:, width/2:, :] return [left_frame, right_frame]
Get current left and right frames from a single image, by splitting the image in half.
def weight_from_comm(self, v, comm): return _c_leiden._MutableVertexPartition_weight_from_comm(self._partition, v, comm)
The total number of edges (or sum of weights) to node ``v`` from community ``comm``. See Also -------- :func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
def _compile_schema(self, schema): assert self.matcher == schema.matcher self.name = schema.name self.compiled_type = schema.compiled_type return schema.compiled
Compile another schema
def register_result(self, job, skip_sanity_checks=False): if self.is_finished: raise RuntimeError("This HB iteration is finished, you canconfigbudgetConfigurations differ!RUNNINGt scheduled for a run." assert d.budget == budget, %(self.data[config_id][], budget) d.time_stamps[budget] = timestamps d.results[budget] = result if (not job.result is None) and np.isfinite(result[]): d.status = else: d.status = d.exceptions[budget] = exception self.num_running -= 1
function to register the result of a job This function is called from HB_master, don't call this from your script.
def run(self, rapid_namelist_file=""): if not self._rapid_executable_location: log("Missing rapid_executable_location. " "Please set before running this function ...", "ERROR") time_start = datetime.datetime.utcnow() temp_rapid_namelist_file = os.path.join(os.getcwd(), "rapid_namelist") if not rapid_namelist_file or not os.path.exists(rapid_namelist_file): self.generate_namelist_file(temp_rapid_namelist_file) else: self.update_namelist_file(rapid_namelist_file, temp_rapid_namelist_file) local_rapid_executable_location = \ os.path.join(os.path.dirname(temp_rapid_namelist_file), "rapid_exe_symlink") def rapid_cleanup(*args): for arg in args: try: os.remove(arg) except OSError: pass temp_link_to_rapid = "" if self._rapid_executable_location != \ local_rapid_executable_location: rapid_cleanup(local_rapid_executable_location) if os.name == "nt": self._create_symlink_cygwin(self._rapid_executable_location, local_rapid_executable_location) else: os.symlink(self._rapid_executable_location, local_rapid_executable_location) temp_link_to_rapid = local_rapid_executable_location log("Running RAPID ...", "INFO") if os.name == "nt": local_rapid_executable_location = \ self._get_cygwin_path(local_rapid_executable_location) run_rapid_command = [local_rapid_executable_location, "-ksp_type", self._ksp_type] if self._num_processors > 1: run_rapid_command = [self._mpiexec_command, "-n", str(self._num_processors)] \ + run_rapid_command process = Popen(run_rapid_command, stdout=PIPE, stderr=PIPE, shell=False) out, err = process.communicate() if err: rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file) raise Exception(err) else: log(, "INFO") for line in out.split(b): print(line) rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file) log("Time to run RAPID: %s" % (datetime.datetime.utcnow()-time_start), "INFO")
Run RAPID program and generate file based on inputs This will generate your rapid_namelist file and run RAPID from wherever you call this script (your working directory). Parameters ---------- rapid_namelist_file: str, optional Path of namelist file to use in the simulation. It will be updated with any parameters added to the RAPID manager. Linux Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/src/rapid' use_all_processors=True, ) rapid_manager.update_parameters( rapid_connect_file='../rapid-io/input/rapid_connect.csv', Vlat_file='../rapid-io/input/m3_riv.nc', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', k_file='../rapid-io/input/k.csv', x_file='../rapid-io/input/x.csv', Qout_file='../rapid-io/output/Qout.nc', ) rapid_manager.update_reach_number_data() rapid_manager.update_simulation_runtime() rapid_manager.run( rapid_namelist_file='../rapid-io/input/rapid_namelist') Linux Reservoir Forcing Flows Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/src/rapid', num_processors=4, IS_for_tot=4, IS_for_use=4, for_tot_id_file='../rapid-io/input/dam_id.csv', for_use_id_file='../rapid-io/input/dam_id.csv', Qfor_file='../rapid-io/input/qout_dams.csv', ZS_dtF=86400, BS_opt_for=True, ) rapid_manager.run( rapid_namelist_file='../rapid-io/input/rapid_namelist_regular') Windows with Cygwin Example: .. code:: python from RAPIDpy import RAPID from os import path rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid', rapid_manager = RAPID( rapid_executable_location=rapid_exe_path, cygwin_bin_location='C:/cygwin64/bin', use_all_processors=True, ZS_TauR=24*3600, ZS_dtR=15*60, ZS_TauM=365*24*3600, ZS_dtM=24*3600 ) rapid_input = 'C:/cygwin64/home/username/rapid-io/input' rapid_output = 'C:/cygwin64/home/username/rapid-io/output' rapid_manager.update_parameters( rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'), Vlat_file=path.join(rapid_input, 'm3_riv.nc'), riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'), k_file=path.join(rapid_input, 'k.csv'), x_file=path.join(rapid_input, 'x.csv'), Qout_file=path.join(rapid_output, 'Qout.nc'), ) rapid_manager.update_reach_number_data() rapid_manager.update_simulation_runtime() rapid_manager.run()
def publish(self, event_type, events): assert event_type in self.events current_queues.queues[.format(event_type)].publish(events)
Publish events.
def _SkipGroup(buffer, pos, end): while 1: (tag_bytes, pos) = ReadTag(buffer, pos) new_pos = SkipField(buffer, pos, end, tag_bytes) if new_pos == -1: return pos pos = new_pos
Skip sub-group. Returns the new position.
def read(afile): the_relative_file = os.path.join(HERE, afile) with codecs.open(the_relative_file, , ) as opened_file: content = filter_out_test_code(opened_file) content = "".join(list(content)) return content
Read a file into setup
def _get_host_details(self): msg = "%s is not a valid system type " % stype raise exception.IloError(msg) else: msg = self._get_extended_error(system) raise exception.IloError(msg) return system
Get the system details.
def authenticate(self): logger.info("Authenticating as %s", self.user[]) data = dict(self.user) data.update({: False}) try: req = self.session.post( self._base_login_url, params=self.params, data=json.dumps(data) ) except PyiCloudAPIResponseError as error: msg = raise PyiCloudFailedLoginException(msg, error) resp = req.json() self.params.update({: resp[][]}) if not os.path.exists(self._cookie_directory): os.mkdir(self._cookie_directory) self.session.cookies.save() logger.debug("Cookies saved to %s", self._get_cookiejar_path()) self.data = resp self.webservices = self.data[] logger.info("Authentication completed successfully") logger.debug(self.params)
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that subsequent logins will not cause additional e-mails from Apple.
def to_html(self, table_width=5): env = self.jinja2_environment template = env.get_template(self.TEMPLATE_NAMES[]) return template.render(program=self, table_width=table_width)
Write the program information to HTML code, which can be saved, printed and brought to the gym. Parameters ---------- table_width The table with of the HTML code. Returns ------- string HTML code.
def open_external_editor(filename=None, sql=None): message = None filename = filename.strip().split(, 1)[0] if filename else None sql = sql or MARKER = query = click.edit(u.format(sql=sql, marker=MARKER), filename=filename, extension=) if filename: try: with open(filename, encoding=) as f: query = f.read() except IOError: message = % filename if query is not None: query = query.split(MARKER, 1)[0].rstrip() else: query = sql return (query, message)
Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element.
def load_blind(self, item): blind = Blind.from_config(self.pyvlx, item) self.add(blind)
Load blind from JSON.
def set_scalebar_for_all(self, row_column_list=None, location=): if row_column_list is None: for subplot in self.subplots: subplot.set_scalebar(location) else: for row, column in row_column_list: subplot = self.get_subplot_at(row, column) subplot.set_scalebar(location)
Show marker area scale for subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'.
def endStep(self,key): ptime = _ptime() if key is not None: self.steps[key][] = ptime self.steps[key][] = ptime[1] - self.steps[key][][1] self.end = ptime print(,key,,ptime[0]) print()
Record the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class.
def get_new_addresses( self, index=0, count=1, security_level=AddressGenerator.DEFAULT_SECURITY_LEVEL, checksum=False, ): return extended.GetNewAddressesCommand(self.adapter)( count=count, index=index, securityLevel=security_level, checksum=checksum, seed=self.seed, )
Generates one or more new addresses from the seed. :param index: The key index of the first new address to generate (must be >= 1). :param count: Number of addresses to generate (must be >= 1). .. tip:: This is more efficient than calling ``get_new_address`` inside a loop. If ``None``, this method will progressively generate addresses and scan the Tangle until it finds one that has no transactions referencing it. :param security_level: Number of iterations to use when generating new addresses. Larger values take longer, but the resulting signatures are more secure. This value must be between 1 and 3, inclusive. :param checksum: Specify whether to return the address with the checksum. Defaults to ``False``. :return: Dict with the following structure:: { 'addresses': List[Address], Always a list, even if only one address was generated. } References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress
def uridefrag(uristring): if isinstance(uristring, bytes): parts = uristring.partition(b) else: parts = uristring.partition(u) return DefragResult(parts[0], parts[2] if parts[1] else None)
Remove an existing fragment component from a URI reference string.
def route(vertices_resources, nets, machine, constraints, placements, allocations={}, core_resource=Cores, radius=20): wrap_around = machine.has_wrap_around_links() route_to_endpoint = {} for constraint in constraints: if isinstance(constraint, RouteEndpointConstraint): route_to_endpoint[constraint.vertex] = constraint.route routes = {} for net in nets: root, lookup = ner_net(placements[net.source], set(placements[sink] for sink in net.sinks), machine.width, machine.height, wrap_around, radius) if route_has_dead_links(root, machine): root, lookup = avoid_dead_links(root, machine, wrap_around) for sink in net.sinks: tree_node = lookup[placements[sink]] if sink in route_to_endpoint: tree_node.children.append((route_to_endpoint[sink], sink)) else: cores = allocations.get(sink, {}).get(core_resource, None) if cores is not None: for core in range(cores.start, cores.stop): tree_node.children.append((Routes.core(core), sink)) else: tree_node.children.append((None, sink)) routes[net] = root return routes
Routing algorithm based on Neighbour Exploring Routing (NER). Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast routing, Parallel Computing (2014). http://dx.doi.org/10.1016/j.parco.2015.01.002 This algorithm attempts to use NER to generate routing trees for all nets and routes around broken links using A* graph search. If the system is fully connected, this algorithm will always succeed though no consideration of congestion or routing-table usage is attempted. Parameters ---------- radius : int Radius of area to search from each node. 20 is arbitrarily selected in the paper and shown to be acceptable in practice. If set to zero, this method is becomes longest dimension first routing.
def facts(self, **kwargs): return self.__api.facts(query=EqualsOperator("certname", self.name), **kwargs)
Get all facts of this node. Additional arguments may also be specified that will be passed to the query function.
def system(cmd, data=None): import subprocess s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE) out, err = s.communicate(data) return out.decode()
pipes the output of a program
def load_pricing_adjustments(self, columns, dts, assets): out = [None] * len(columns) for i, column in enumerate(columns): adjs = {} for asset in assets: adjs.update(self._get_adjustments_in_range( asset, dts, column)) out[i] = adjs return out
Returns ------- adjustments : list[dict[int -> Adjustment]] A list, where each element corresponds to the `columns`, of mappings from index to adjustment objects to apply at that index.
def get_region_vcf(self, case_obj, chrom=None, start=None, end=None, gene_obj=None, variant_type=, category=, rank_threshold=None): rank_threshold = rank_threshold or -100 variant_file = None if variant_type == : if category == : variant_file = case_obj[].get() elif category == : variant_file = case_obj[].get() elif category == : variant_file = case_obj[].get() elif variant_type == : if category == : variant_file = case_obj[].get() elif category == : variant_file = case_obj[].get() if not variant_file: raise SyntaxError("Vcf file does not seem to exist") vcf_obj = VCF(variant_file) region = "" if gene_obj: chrom = gene_obj[] start = gene_obj[] end = gene_obj[] if chrom: if (start and end): region = "{0}:{1}-{2}".format(chrom, start, end) else: region = "{0}".format(chrom) else: rank_threshold = rank_threshold or 5 with tempfile.NamedTemporaryFile(mode=, delete=False) as temp: file_name = str(pathlib.Path(temp.name)) for header_line in vcf_obj.raw_header.split(): if len(header_line) > 3: temp.write(header_line + ) for variant in vcf_obj(region): temp.write(str(variant)) return file_name
Produce a reduced vcf with variants from the specified coordinates This is used for the alignment viewer. Args: case_obj(dict): A case from the scout database variant_type(str): 'clinical' or 'research'. Default: 'clinical' category(str): 'snv' or 'sv'. Default: 'snv' rank_threshold(float): Only load variants above this score. Default: 5 chrom(str): Load variants from a certain chromosome start(int): Specify the start position end(int): Specify the end position gene_obj(dict): A gene object from the database Returns: file_name(str): Path to the temporary file
def request(self, endpoint): method = endpoint.method method = method.lower() params = None try: params = getattr(endpoint, "params") except AttributeError: params = {} headers = {} if hasattr(endpoint, "HEADERS"): headers = getattr(endpoint, "HEADERS") request_args = {} if method == : request_args[] = params elif hasattr(endpoint, "data") and endpoint.data: request_args[] = endpoint.data request_args.update(self._request_params) if not (hasattr(endpoint, "STREAM") and getattr(endpoint, "STREAM") is True): url = "{}/{}".format( TRADING_ENVIRONMENTS[self.environment]["api"], endpoint) response = self.__request(method, url, request_args, headers=headers) content = response.content.decode() content = json.loads(content) endpoint.response = content endpoint.status_code = response.status_code return content else: url = "{}/{}".format( TRADING_ENVIRONMENTS[self.environment]["stream"], endpoint) endpoint.response = self.__stream_request(method, url, request_args, headers=headers) return endpoint.response
Perform a request for the APIRequest instance 'endpoint'. Parameters ---------- endpoint : APIRequest The endpoint parameter contains an instance of an APIRequest containing the endpoint, method and optionally other parameters or body data. Raises ------ V20Error in case of HTTP response code >= 400
def tweet(tweet_text_func): This tweet is not data-driven. def tweet_func(): api = _connect_to_twitter() tweet = tweet_text_func() print "Tweeting: %s" % tweet api.update_status(tweet) return tweet return tweet_func
A decorator to make a function Tweet Parameters - `tweet_text_func` is a function that takes no parameters and returns a tweetable string For example:: @tweet def total_deposits_this_week(): # ... @tweet def not_an_interesting_tweet(): return 'This tweet is not data-driven.'
def add_char(self, char): if char == : self.flush() self._r.add_tab() elif char in : self.flush() self._r.add_br() else: self._bfr.append(char)
Process the next character of input through the translation finite state maching (FSM). There are two possible states, buffer pending and not pending, but those are hidden behind the ``.flush()`` method which must be called at the end of text to ensure any pending ``<w:t>`` element is written.
def pipeline(stages, run=True, stride=1, chunksize=None): r from pyemma.coordinates.pipelines import Pipeline if not isinstance(stages, list): stages = [stages] p = Pipeline(stages, param_stride=stride, chunksize=chunksize) if run: p.parametrize() return p
r""" Data analysis pipeline. Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it (unless prevented). If this function takes too long, consider loading data in memory. Alternatively if the data is to large to be loaded into memory make use of the stride parameter. Parameters ---------- stages : data input or list of pipeline stages If given a single pipeline stage this must be a data input constructed by :py:func:`source`. If a list of pipelining stages are given, the first stage must be a data input constructed by :py:func:`source`. run : bool, optional, default = True If True, the pipeline will be parametrized immediately with the given stages. If only an input stage is given, the run flag has no effect at this time. True also means that the pipeline will be immediately re-parametrized when further stages are added to it. *Attention* True means this function may take a long time to compute. If False, the pipeline will be passive, i.e. it will not do any computations before you call parametrize() stride : int, optional, default = 1 If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride. See also stride option in the output functions of the pipeline. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` A pipeline object that is able to conduct big data analysis with limited memory in streaming mode. Examples -------- >>> import numpy as np >>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline Create some random data and cluster centers: >>> data = np.random.random((1000, 3)) >>> centers = data[np.random.choice(1000, 10)] >>> reader = source(data) Define a TICA transformation with lag time 10: >>> tica_obj = tica(lag=10) Assign any input to given centers: >>> assign = assign_to_centers(centers=centers) >>> pipe = pipeline([reader, tica_obj, assign]) >>> pipe.parametrize() .. autoclass:: pyemma.coordinates.pipelines.Pipeline :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :attributes:
def _det_inference(self): if (self.n_randEffs==2) and (~sp.isnan(self.Y).any()): rv = else: rv = return rv
Internal method for determining the inference method
def getLocalDeformationEnergy(self, bp, complexDna, freeDnaFrames=None, boundDnaFrames=None, helical=False, unit=, which=, outFile=None): r if helical: energyTerms = [, , , , , , , ] else: energyTerms = [, , , , , , , ] if isinstance(which, str): if which != : raise ValueError(.format(energyTerms)) else: which = energyTerms elif isinstance(which, list): for key in which: if key not in energyTerms: raise ValueError(.format( which, energyTerms)) else: raise ValueError(.format(energyTerms)) means, esMatrix = self.calculateLocalElasticity(bp, frames=freeDnaFrames, helical=helical, unit=unit) time, array = self.extractLocalParameters(complexDna, bp, frames=boundDnaFrames, helical=helical) energyOut = OrderedDict() for key in which: energyOut[key] = [] for i in range(array[0].shape[0]): vec = array[:, i] diff = vec - means for key in which: t_energy = self._calcLocalEnergy(diff, esMatrix, key) energyOut[key].append(t_energy) for key in which: energyOut[key] = np.asarray(energyOut[key]) if outFile is not None: with open(outFile, ) as fout: fout.write() for name in which: fout.write(.format(name)) fout.write() for t in range(len(time)): fout.write(.format(time[t])) for name in which: fout.write(.format(energyOut[name][t])) fout.write() return time, energyOut
r"""Deformation energy of the input DNA using local elastic properties The deformation energy of a base-step/s for probe DNA object with reference to the same base-step/s DNA present in the current DNA object. The deformation free energy is calculated using elastic matrix as follows .. math:: G = \frac{1}{2}\mathbf{xKx^T} When ``helical='False'`` .. math:: \mathbf{K} = \mathbf{K}_{base-step} .. math:: \mathbf{x} = \begin{bmatrix} (Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) & (\rho_i - \rho_0) & (\omega_i - \omega_0) \end{bmatrix} When ``helical='True'`` .. math:: \mathbf{K} = \mathbf{K}_{helical-base-step} .. math:: \mathbf{x} = \begin{bmatrix} (dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) & (\theta_i - \theta_0) & (\Omega_i - \Omega_0) \end{bmatrix} .. currentmodule:: dnaMD Parameters ---------- bp : list List of two base-steps forming the DNA segment. For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered. complexDna : :class:`dnaMD.DNA` Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated. freeDnaFrames : list To select a trajectory segment of current (free) DNA data. List of two trajectory frames between which parameters will be extracted. It can be used to select portions of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be considered. boundDnaFrames : list To select a trajectory segment of input (bound) DNA data. List of two trajectory frames between which parameters will be extracted. It can be used to select portions of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be considered. helical : bool If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise, by default, elastic matrix for **base-step** parameters are calculated. unit : str Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``. which : str or list For which motions (degrees of freedom), energy should be calculated. It should be either a list containing terms listed below or"all" for all energy terms. Following keywords are available: * ``'full'`` : Use entire elastic matrix -- all parameters with their coupling * ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling * ``'shift'`` or ``'x-disp'`` * ``'slide'`` or ``'y-idsp'`` * ``'rise'`` or ``'h-rise'`` * ``'tilt'`` or ``'inclination'`` * ``'roll'`` or ``'tip'`` * ``'twist'`` or ``'h-twist'`` outFile : str Output file in csv format. Returns ------- time : numpy.ndarray 1D array containing time values. energy : dict of numpy.ndarray Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
def prep_db_parallel(samples, parallel_fn): batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls) to_process = [] has_batches = False for (name, caller), info in batch_groups.items(): fnames = [x[0] for x in info] to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras]) has_batches = True for name, caller, data, fname in singles: to_process.append([[fname], (str(name), caller, False), [data], extras]) output = parallel_fn("prep_gemini_db", to_process) out_fetch = {} for batch_id, out_file in output: out_fetch[tuple(batch_id)] = out_file out = [] for batch_name, data in out_retrieve: out_variants = [] for vrn in data["variants"]: use_population = vrn.pop("population", True) if use_population: vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])] out_variants.append(vrn) data["variants"] = out_variants out.append([data]) for x in extras: out.append([x]) return out
Prepares gemini databases in parallel, handling jointly called populations.
def get_assessment_ids(self): if not self.is_assessment_based_activity(): raise IllegalState() else: return [Id(a) for a in self._my_map[]]
Gets the Ids of any assessments associated with this activity. return: (osid.id.IdList) - list of assessment Ids raise: IllegalState - is_assessment_based_activity() is false compliance: mandatory - This method must be implemented.
def preview(src_path): previews = [] if sketch.is_sketchfile(src_path): previews = sketch.preview(src_path) if not previews: previews = quicklook.preview(src_path) previews = [safely_decode(preview) for preview in previews] return previews
Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page.
def parse_rpm_output(output, tags=None, separator=): if tags is None: tags = image_component_rpm_tags def field(tag): try: value = fields[tags.index(tag)] except ValueError: return None if value == : return None return value components = [] sigmarker = for rpm in output: fields = rpm.rstrip().split(separator) if len(fields) < len(tags): continue signature = field() or field() if signature: parts = signature.split(sigmarker, 1) if len(parts) > 1: signature = parts[1] component_rpm = { : , : field(), : field(), : field(), : field(), : field(), : signature, } epoch = field() if epoch is not None: epoch = int(epoch) component_rpm[] = epoch if component_rpm[] != : components.append(component_rpm) return components
Parse output of the rpm query. :param output: list, decoded output (str) from the rpm subprocess :param tags: list, str fields used for query output :return: list, dicts describing each rpm package
async def delete(self, device, remove=True): device = self._find_device(device) if not self.is_handleable(device) or not device.is_loop: self._log.warn(_(, device)) return False if remove: await self.auto_remove(device, force=True) self._log.debug(_(, device)) await device.delete() self._log.info(_(, device)) return True
Detach the loop device. :param device: device object, block device path or mount path :param bool remove: whether to unmount the partition etc. :returns: whether the loop device is deleted
def add_link(self, rel, target, wrap=False, **kwargs): if isinstance(target, bytes): target = target.decode() if isinstance(target, str) or isinstance(target, unicode): new_link = dict(href=target, **kwargs) else: new_link = dict(href=target.url(), **kwargs) self._add_rel(, rel, new_link, wrap) return self
Adds a link to the document. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. Unlike ``dougrain.Document.add_link``, this method does not detect equivalence between relationship types with different representations. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``dougrain.Document`` object, a link is added with ``target``'s URL as its ``href`` property and other property from the keyword arguments. If ``target`` is a ``Builder`` object, a link is added with ``target``'s URL as its ``href`` property and other property from the keyword arguments. This method returns self, allowing it to be chained with additional method calls. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``.
def byte_adaptor(fbuffer): if six.PY3: strings = fbuffer.read().decode() fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
def list_by_instance(self, instance_id): ports = port_list(self.request, device_id=instance_id) sg_ids = [] for p in ports: sg_ids += p.security_groups return self._list(id=set(sg_ids)) if sg_ids else []
Gets security groups of an instance. :returns: List of SecurityGroup objects associated with the instance
def add_doc(self, doc): if isinstance(doc, HelloDoc) and doc.validate(): self.docs.append(doc) else: if not doc.validate(): raise Exception("HelloDoc Errors %s" % (doc.errors,)) else: raise Exception("add_doc doc must be an instance of class HelloDoc")
Simple dict of {'name': '@filename.pdf'}
def open_acqdata(filename, user=, filemode=): if filename.lower().endswith((".hdf5", ".h5")): return HDF5Data(filename, user, filemode) elif filename.lower().endswith((".pst", ".raw")): return BatlabData(filename, user, filemode) else: print "File format not supported: ", filename
Opens and returns the correct AcquisitionData object according to filename extention. Supported extentions: * .hdf5, .h5 for sparkle data * .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>` examples (if data file already exists):: data = open_acqdata('myexperiment.hdf5', filemode='r') print data.dataset_names() for batlab data:: data = open('mouse666.raw', filemode='r') print data.dataset_names()
def compute_rotsym(self, threshold=1e-3*angstrom): graph = MolecularGraph.from_geometry(self, scaling=1.5) try: return compute_rotsym(self, graph, threshold) except ValueError: raise ValueError("The rotational symmetry number can only be computed when the graph is fully connected.")
Compute the rotational symmetry number. Optional argument: | ``threshold`` -- only when a rotation results in an rmsd below the given threshold, the rotation is considered to transform the molecule onto itself.
def build_global(self, global_node): config_block_lines = self.__build_config_block( global_node.config_block) return config.Global(config_block=config_block_lines)
parse `global` section, and return the config.Global Args: global_node (TreeNode): `global` section treenode Returns: config.Global: an object
def upload(self, filepaths, enable_matching=False, transcode_quality=, delete_on_success=False): filenum = 0 total = len(filepaths) results = [] errors = {} pad = len(str(total)) exist_strings = ["ALREADY_EXISTS", "this song is already uploaded"] for result in self._upload(filepaths, enable_matching=enable_matching, transcode_quality=transcode_quality): filepath = filepaths[filenum] filenum += 1 uploaded, matched, not_uploaded, error = result if uploaded: logger.info( "({num:>{pad}}/{total}) Successfully uploaded -- {file} ({song_id})".format( num=filenum, pad=pad, total=total, file=filepath, song_id=uploaded[filepath] ) ) results.append({: , : filepath, : uploaded[filepath]}) elif matched: logger.info( "({num:>{pad}}/{total}) Successfully scanned and matched -- {file} ({song_id})".format( num=filenum, pad=pad, total=total, file=filepath, song_id=matched[filepath] ) ) results.append({: , : filepath, : matched[filepath]}) elif error: logger.warning("({num:>{pad}}/{total}) Error on upload -- {file}".format(num=filenum, pad=pad, total=total, file=filepath)) results.append({: , : filepath, : error[filepath]}) errors.update(error) else: if any(exist_string in not_uploaded[filepath] for exist_string in exist_strings): response = "ALREADY EXISTS" song_id = GM_ID_RE.search(not_uploaded[filepath]).group(0) logger.info( "({num:>{pad}}/{total}) Failed to upload -- {file} ({song_id}) | {response}".format( num=filenum, pad=pad, total=total, file=filepath, response=response, song_id=song_id ) ) results.append({: , : filepath, : song_id, : not_uploaded[filepath]}) else: response = not_uploaded[filepath] logger.info( "({num:>{pad}}/{total}) Failed to upload -- {file} | {response}".format( num=filenum, pad=pad, total=total, file=filepath, response=response ) ) results.append({: , : filepath, : not_uploaded[filepath]}) success = (uploaded or matched) or (not_uploaded and in not_uploaded[filepath]) if success and delete_on_success: try: os.remove(filepath) except (OSError, PermissionError): logger.warning("Failed to remove {} after successful upload".format(filepath)) if errors: logger.info("\n\nThe following errors occurred:\n") for filepath, e in errors.items(): logger.info("{file} | {error}".format(file=filepath, error=e)) logger.info("\nThese filepaths may need to be synced again.\n") return results
Upload local songs to Google Music. Parameters: filepaths (list or str): Filepath(s) to upload. enable_matching (bool): If ``True`` attempt to use `scan and match <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__. This requieres ffmpeg or avconv. transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality <http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__. If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality <http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__. Default: ``320k`` delete_on_success (bool): Delete successfully uploaded local files. Default: ``False`` Returns: A list of result dictionaries. :: [ {'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded {'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched {'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error {'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS {'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded ]
def _switch_charset_list(characters, target=): characters = characters[:] offset = block_offset * offsets[target][] for n in range(len(characters)): chars = list(characters[n]) for m in range(len(chars)): char = chars[m] char_offset = ord(char) + offset if in_range(char_offset, target): chars[m] = chr(char_offset) else: chars[m] = char characters[n] = .join(chars) return characters
Switches the character set of a list. If a character does not have an equivalent in the target script (e.g. ヹ when converting to hiragana), the original character is kept.
def get_plot(self, subplot=False, width=None, height=None, xmin=-6., xmax=6., yscale=1, colours=None, plot_total=True, legend_on=True, num_columns=2, legend_frame_on=False, legend_cutoff=3, xlabel=, ylabel=, zero_to_efermi=True, dpi=400, fonts=None, plt=None, style=None, no_base_style=False): plot_data = self.dos_plot_data(yscale=yscale, xmin=xmin, xmax=xmax, colours=colours, plot_total=plot_total, legend_cutoff=legend_cutoff, subplot=subplot, zero_to_efermi=zero_to_efermi) if subplot: nplots = len(plot_data[]) plt = pretty_subplot(nplots, 1, width=width, height=height, dpi=dpi, plt=plt) else: plt = pretty_plot(width=width, height=height, dpi=dpi, plt=plt) mask = plot_data[] energies = plot_data[][mask] fig = plt.gcf() lines = plot_data[] spins = [Spin.up] if len(lines[0][0][]) == 1 else \ [Spin.up, Spin.down] for i, line_set in enumerate(plot_data[]): if subplot: ax = fig.axes[i] else: ax = plt.gca() for line, spin in itertools.product(line_set, spins): if spin == Spin.up: label = line[] densities = line[][spin][mask] elif spin == Spin.down: label = "" densities = -line[][spin][mask] ax.fill_between(energies, densities, lw=0, facecolor=line[], alpha=line[]) ax.plot(energies, densities, label=label, color=line[]) ax.set_ylim(plot_data[], plot_data[]) ax.set_xlim(xmin, xmax) ax.tick_params(axis=, labelleft=) ax.yaxis.set_minor_locator(AutoMinorLocator(2)) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) loc = if subplot else ncol = 1 if subplot else num_columns if legend_on: ax.legend(loc=loc, frameon=legend_frame_on, ncol=ncol) if subplot: ax.set_xlabel(xlabel) fig.subplots_adjust(hspace=0) plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False) if in matplotlib.rcParams: ylabelcolor = matplotlib.rcParams[] else: ylabelcolor = None fig.text(0.08, 0.5, ylabel, ha=, color=ylabelcolor, va=, rotation=, transform=ax.transAxes) else: ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return plt
Get a :obj:`matplotlib.pyplot` object of the density of states. Args: subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. width (:obj:`float`, optional): The width of the plot. height (:obj:`float`, optional): The height of the plot. xmin (:obj:`float`, optional): The minimum energy on the x-axis. xmax (:obj:`float`, optional): The maximum energy on the x-axis. yscale (:obj:`float`, optional): Scaling factor for the y-axis. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults to ``True``. num_columns (:obj:`int`, optional): The number of columns in the legend. legend_frame_on (:obj:`bool`, optional): Plot a frame around the graph legend. Defaults to ``False``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy) ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS) zero_to_efermi (:obj:`bool`, optional): Normalise the plot such that the Fermi level is set as 0 eV. dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for the image. fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a a single font, specified as a :obj:`str`, or several fonts, specified as a :obj:`list` of :obj:`str`. plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot` object to use for plotting. style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib style specifications, to be composed on top of Sumo base style. no_base_style (:obj:`bool`, optional): Prevent use of sumo base style. This can make alternative styles behave more predictably. Returns: :obj:`matplotlib.pyplot`: The density of states plot.
def strip_context_items(self, a_string): strings_to_strip = [r"\[edit.*\]"] response_list = a_string.split(self.RESPONSE_RETURN) last_line = response_list[-1] for pattern in strings_to_strip: if re.search(pattern, last_line): return self.RESPONSE_RETURN.join(response_list[:-1]) return a_string
Strip PaloAlto-specific output. PaloAlto will also put a configuration context: [edit] This method removes those lines.
def find_packages(path): matches = [] root = join(path, , , ) for folder, dirnames, filenames in os.walk(root): for filename in fnmatch.filter(filenames, ): with open(join(folder, filename)) as f: if "implements EnamlPackage" in f.read(): package = os.path.relpath(folder, root) matches.append(os.path.join(package, filename)) return matches
Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path.
def train(sess, loss, x_train, y_train, init_all=False, evaluate=None, feed=None, args=None, rng=None, var_list=None, fprop_args=None, optimizer=None, devices=None, x_batch_preprocessor=None, use_ema=False, ema_decay=.998, run_canary=None, loss_threshold=1e5, dataset_train=None, dataset_size=None): canary.run_canary() if run_canary is not None: warnings.warn("The `run_canary` argument is deprecated. The canary " "is now much cheaper and thus runs all the time. The " "canary now uses its own loss function so it is not " "necessary to turn off the canary when training with " " a stochastic loss. Simply quit passing `run_canary`." "Passing `run_canary` may become an error on or after " "2019-10-16.") args = _ArgsWrapper(args or {}) fprop_args = fprop_args or {} if args.nb_epochs is None: raise ValueError("`args` must specify number of epochs") if optimizer is None: if args.learning_rate is None: raise ValueError("Learning rate was not given in args dict") assert args.batch_size, "Batch size was not given in args dict" if rng is None: rng = np.random.RandomState() if optimizer is None: optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) else: if not isinstance(optimizer, tf.train.Optimizer): raise ValueError("optimizer object must be from a child class of " "tf.train.Optimizer") grads = [] xs = [] preprocessed_xs = [] ys = [] if dataset_train is not None: assert x_train is None and y_train is None and x_batch_preprocessor is None if dataset_size is None: raise ValueError("You must provide a dataset size") data_iterator = dataset_train.make_one_shot_iterator().get_next() x_train, y_train = sess.run(data_iterator) devices = infer_devices(devices) for device in devices: with tf.device(device): x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:]) y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:]) xs.append(x) ys.append(y) if x_batch_preprocessor is not None: x = x_batch_preprocessor(x) preprocessed_xs.append(x) loss_value = loss.fprop(x, y, **fprop_args) grads.append(optimizer.compute_gradients( loss_value, var_list=var_list)) num_devices = len(devices) print("num_devices: ", num_devices) grad = avg_grads(grads) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = optimizer.apply_gradients(grad) epoch_tf = tf.placeholder(tf.int32, []) batch_tf = tf.placeholder(tf.int32, []) if use_ema: if callable(ema_decay): ema_decay = ema_decay(epoch_tf, batch_tf) ema = tf.train.ExponentialMovingAverage(decay=ema_decay) with tf.control_dependencies([train_step]): train_step = ema.apply(var_list) x_train_shuffled = x_train[index_shuf] y_train_shuffled = y_train[index_shuf] prev = time.time() for batch in range(nb_batches): if dataset_train is not None: x_train_shuffled, y_train_shuffled = sess.run(data_iterator) start, end = 0, batch_size else: start = batch * batch_size end = (batch + 1) * batch_size diff = end - start assert diff == batch_size feed_dict = {epoch_tf: epoch, batch_tf: batch} for dev_idx in xrange(num_devices): cur_start = start + dev_idx * device_batch_size cur_end = start + (dev_idx + 1) * device_batch_size feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end] feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end] if cur_end != end and dataset_train is None: msg = ("batch_size (%d) must be a multiple of num_devices " "(%d).\nCUDA_VISIBLE_DEVICES: %s" "\ndevices: %s") args = (batch_size, num_devices, os.environ[], str(devices)) raise ValueError(msg % args) if feed is not None: feed_dict.update(feed) _, loss_numpy = sess.run( [train_step, loss_value], feed_dict=feed_dict) if np.abs(loss_numpy) > loss_threshold: raise ValueError("Extreme loss during training: ", loss_numpy) if np.isnan(loss_numpy) or np.isinf(loss_numpy): raise ValueError("NaN/Inf loss during training") assert (dataset_train is not None or end == len(index_shuf)) cur = time.time() _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) + " seconds") if evaluate is not None: if use_ema: sess.run(swap) evaluate() if use_ema: sess.run(swap) if use_ema: sess.run(swap) return True
Run (optionally multi-replica, synchronous) training to minimize `loss` :param sess: TF session to use when training the graph :param loss: tensor, the loss to minimize :param x_train: numpy array with training inputs or tf Dataset :param y_train: numpy array with training outputs or tf Dataset :param init_all: (boolean) If set to true, all TF variables in the session are (re)initialized, otherwise only previously uninitialized variables are initialized before training. :param evaluate: function that is run after each training iteration (typically to display the test/validation accuracy). :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Should contain `nb_epochs`, `learning_rate`, `batch_size` :param rng: Instance of numpy.random.RandomState :param var_list: Optional list of parameters to train. :param fprop_args: dict, extra arguments to pass to fprop (loss and model). :param optimizer: Optimizer to be used for training :param devices: list of device names to use for training If None, defaults to: all GPUs, if GPUs are available all devices, if no GPUs are available :param x_batch_preprocessor: callable Takes a single tensor containing an x_train batch as input Returns a single tensor containing an x_train batch as output Called to preprocess the data before passing the data to the Loss :param use_ema: bool If true, uses an exponential moving average of the model parameters :param ema_decay: float or callable The decay parameter for EMA, if EMA is used If a callable rather than a float, this is a callable that takes the epoch and batch as arguments and returns the ema_decay for the current batch. :param loss_threshold: float Raise an exception if the loss exceeds this value. This is intended to rapidly detect numerical problems. Sometimes the loss may legitimately be higher than this value. In such cases, raise the value. If needed it can be np.inf. :param dataset_train: tf Dataset instance. Used as a replacement for x_train, y_train for faster performance. :param dataset_size: integer, the size of the dataset_train. :return: True if model trained
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): if path.endswith(): self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith(): self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith(): self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) if path.endswith(): self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits) :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return:
def save(self): try: email = models.EmailAddress.objects.get( email=self.validated_data["email"], is_verified=True ) except models.EmailAddress.DoesNotExist: return None token = models.PasswordResetToken.objects.create(email=email) token.send() return token
Send out a password reset if the provided data is valid. If the provided email address exists and is verified, a reset email is sent to the address. Returns: The password reset token if it was returned and ``None`` otherwise.
def process(self, metrics, config): LOG.debug("Process called") for metric in metrics: metric.tags["instance-id"] = config["instance-id"] return metrics
Processes metrics. This method is called by the Snap deamon during the process phase of the execution of a Snap workflow. Examples of processing metrics include applying filtering, max, min, average functions as well as adding additional context to the metrics to name just a few. In this example we are adding a tag called 'context' to every metric. Args: metrics (obj:`list` of `snap_plugin.v1.Metric`): List of metrics to be processed. Returns: :obj:`list` of `snap_plugin.v1.Metric`: List of processed metrics.
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0, drawOrder=0, cellSize=None, resampleMethod=): timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters) converter = RasterConverter(sqlAlchemyEngineOrSession=session) if isinstance(colorRamp, dict): converter.setCustomColorRamp(colorRamp[], colorRamp[]) else: converter.setDefaultColorRamp(colorRamp) if documentName is None: documentName = self.fileExtension kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName, timeStampedRasters=timeStampedRasters, rasterIdFieldName=, rasterFieldName=, documentName=documentName, alpha=alpha, drawOrder=drawOrder, cellSize=cellSize, noDataValue=noDataValue, resampleMethod=resampleMethod) if path: directory = os.path.dirname(path) archiveName = (os.path.split(path)[1]).split()[0] kmzPath = os.path.join(directory, (archiveName + )) with ZipFile(kmzPath, ) as kmz: kmz.writestr(archiveName + , kmlString) for index, binaryPngString in enumerate(binaryPngStrings): kmz.writestr(.format(index), binaryPngString) return kmlString, binaryPngStrings
Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images.
def get_usertag(email, *tags): reply = _soap_client_call(, email, *tags) map_el = reply() mapping = {} type_attr = map_el.attributes().get() if type_attr and type_attr.value == : for usertag_el in map_el.children() or []: tag = _uc(str(usertag_el())) buglist_el = usertag_el() mapping[tag] = [int(bug) for bug in buglist_el.children() or []] else: for usertag_el in map_el.children() or []: tag = _uc(usertag_el.get_name()) mapping[tag] = [int(bug) for bug in usertag_el.children() or []] return mapping
Get buglists by usertags. Parameters ---------- email : str tags : tuple of strings If tags are given the dictionary is limited to the matching tags, if no tags are given all available tags are returned. Returns ------- mapping : dict a mapping of usertag -> buglist
def rpc_fix_code_with_yapf(self, source, directory): source = get_source(source) return fix_code_with_yapf(source, directory)
Formats Python code to conform to the PEP 8 style guide.
def stop(self): super(BitfinexWSS, self).stop() log.info("BitfinexWSS.stop(): Stopping client..") log.info("BitfinexWSS.stop(): Joining receiver thread..") try: self.receiver_thread.join() if self.receiver_thread.is_alive(): time.time(1) except AttributeError: log.debug("BitfinexWSS.stop(): Receiver thread was not running!") log.info("BitfinexWSS.stop(): Joining processing thread..") try: self.processing_thread.join() if self.processing_thread.is_alive(): time.time(1) except AttributeError: log.debug("BitfinexWSS.stop(): Processing thread was not running!") log.info("BitfinexWSS.stop(): Closing websocket conection..") try: self.conn.close() except WebSocketConnectionClosedException: pass except AttributeError: pass self.conn = None self.processing_thread = None self.receiver_thread = None log.info("BitfinexWSS.stop(): Done!")
Stop all threads and modules of the client. :return:
def boto_fix_security_token_in_profile(self, connect_args): profile = + self.boto_profile if boto.config.has_option(profile, ): connect_args[] = boto.config.get(profile, ) return connect_args
monkey patch for boto issue boto/boto#2100
def to_df(self, varnames=None, ranefs=False, transformed=False, chains=None): names = self._filter_names(varnames, ranefs, transformed) if chains is None: chains = list(range(self.n_chains)) chains = listify(chains) data = [self.data[:, i, :] for i in chains] data = np.concatenate(data, axis=0) df = sum([self.level_dict[x] for x in names], []) df = pd.DataFrame({x: data[:, self.levels.index(x)] for x in df}) return df
Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains concatenated. Args: varnames (list): List of variable names to include; if None (default), all eligible variables are included. ranefs (bool): Whether or not to include random effects in the returned DataFrame. Default is True. transformed (bool): Whether or not to include internally transformed variables in the result. Default is False. chains (int, list): Index, or list of indexes, of chains to concatenate. E.g., [1, 3] would concatenate the first and third chains, and ignore any others. If None (default), concatenates all available chains.
def gen_hot_url(hot_index, page=1): assert hasattr(WechatSogouConst.hot_index, hot_index) assert isinstance(page, int) and page > 0 index_urls = { WechatSogouConst.hot_index.hot: 0, WechatSogouConst.hot_index.gaoxiao: 1, WechatSogouConst.hot_index.health: 2, WechatSogouConst.hot_index.sifanghua: 3, WechatSogouConst.hot_index.gossip: 4, WechatSogouConst.hot_index.technology: 5, WechatSogouConst.hot_index.finance: 6, WechatSogouConst.hot_index.car: 7, WechatSogouConst.hot_index.life: 8, WechatSogouConst.hot_index.fashion: 9, WechatSogouConst.hot_index.mummy: 10, WechatSogouConst.hot_index.travel: 11, WechatSogouConst.hot_index.job: 12, WechatSogouConst.hot_index.food: 13, WechatSogouConst.hot_index.history: 14, WechatSogouConst.hot_index.study: 15, WechatSogouConst.hot_index.constellation: 16, WechatSogouConst.hot_index.sport: 17, WechatSogouConst.hot_index.military: 18, WechatSogouConst.hot_index.game: 19, WechatSogouConst.hot_index.pet: 20, } return .format(index_urls[hot_index], page - 1)
拼接 首页热门文章 URL Parameters ---------- hot_index : WechatSogouConst.hot_index 首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx page : int 页数 Returns ------- str 热门文章分类的url
def diff_lorenz(value_array, sigma, beta, rho): diff_array = np.zeros(3) diff_array[0] = sigma * (value_array[1]-value_array[0]) diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1] diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2] return diff_array
The Lorenz attractor differential equation :param value_array: 3d array containing the x,y, and z component values. :param sigma: Constant attractor parameter :param beta: FConstant attractor parameter :param rho: Constant attractor parameter :return: 3d array of the Lorenz system evaluated at `value_array`
def strict_parse(cls, query_str, *specs, extra_parameters=True): plain_result = cls.parse(query_str) return WStrictURIQuery(plain_result, *specs, extra_parameters=extra_parameters)
Parse query and return :class:`.WStrictURIQuery` object :param query_str: query component of URI to parse :param specs: list of parameters specifications :param extra_parameters: whether parameters that was not specified in "specs" are allowed :return: WStrictURIQuery
def get_directory_relative_to_git_root(directory: str): return os.path.relpath(os.path.realpath(directory), get_git_root_directory(directory))
Gets the path to the given directory relative to the git repository root in which it is a subdirectory. :param directory: the directory within a git repository :return: the path to the directory relative to the git repository root
def do_banner(self, arg, arguments): print arguments n = int(arguments[]) c = arguments[] i = int(arguments[]) color = arguments[].upper() Console._print(color, "", i * " " + (n-i) * c) Console._print(color, "", i * " " + c + " " + arguments[]) Console._print(color, "", i * " " + (n-i) * c)
:: Usage: banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT Arguments: TEXT The text message from which to create the banner CHAR The character for the frame. WIDTH Width of the banner INDENT indentation of the banner COLOR the color Options: -c CHAR The character for the frame. [default: #] -n WIDTH The width of the banner. [default: 70] -i INDENT The width of the banner. [default: 0] -r COLOR The color of the banner. [default: BLACK] Prints a banner form a one line text message.
def set_grads(params, params_with_grad): for param, param_w_grad in zip(params, params_with_grad): if param.grad is None: param.grad = torch.nn.Parameter(torch.empty_like(param)) param.grad.data.copy_(param_w_grad.grad.data)
Copies gradients from param_with_grad to params :param params: dst parameters :param params_with_grad: src parameters
def bind(self, database): self._database = database for _ in xrange(self.size): session = self._new_session() session.create() self.put(session)
Associate the pool with a database. :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database used by the pool: used to create sessions when needed.
def quantile(self, q=0.5, interpolation=): self._check_percentile(q) df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name return self._constructor(result, index=Float64Index(q), name=self.name) else: return result.iloc[0]
Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile numpy.percentile Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64