code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def _parse_description(html_chunk): description_tag = html_chunk.match( ["div", {"class": "kniha_detail_text"}], "p" ) if not description_tag: return None description = get_first_content(description_tag) description = description.replace("<br />", "\n") description = description.replace("<br/>", "\n") return dhtmlparser.removeTags(description).strip()
Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found.
def context_value(name): def context_value(_value, context, **_params): return defer.succeed(context[name]) return context_value
Returns an effect that drops the current value, and replaces it with the value from the context with the given name.
def any_slug_field(field, **kwargs): letters = ascii_letters + digits + return xunit.any_string(letters = letters, max_length = field.max_length)
Return random value for SlugField >>> result = any_field(models.SlugField()) >>> type(result) <type 'str'> >>> from django.core.validators import slug_re >>> re.match(slug_re, result) is not None True
def close(self): if not self.closable: LOGGER.warning(, self.state_description) raise ConnectionStateError(self.state_description) self.state = self.STATE_CLOSING LOGGER.info() self.connection.close()
Cleanly shutdown the connection to RabbitMQ :raises: sprockets.mixins.amqp.ConnectionStateError
def _backup_pb_tqdm(self, dirs): with ZipFile(self.zip_filename, ) as backup_zip: for path in tqdm(dirs, desc=, total=len(dirs)): backup_zip.write(path, path[len(self.source):len(path)])
Create a backup with a tqdm progress bar.
def get_default_config(self): config = super(NetstatCollector, self).get_default_config() config.update({ : , }) return config
Returns the default collector settings
def discover(scope, loglevel, capture): "Discover systems using WS-Discovery" if loglevel: level = getattr(logging, loglevel, None) if not level: print("Invalid log level " % loglevel) return logger.setLevel(level) run(scope=scope, capture=capture)
Discover systems using WS-Discovery
def _ReadStreamDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE) else: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE) return self._ReadElementSequenceDataTypeDefinition( definitions_registry, definition_values, data_types.StreamDefinition, definition_name, supported_definition_values)
Reads a stream data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StreamDefinition: stream data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
def setParent(self,parent): self.parent = parent self.parent.child_bones[self.name]=self
Sets the parent of this bone for all entities. Note that this method must be called before many other methods to ensure internal state has been initialized. This method also registers this bone as a child of its parent.
async def open(self, wait_for_completion=True): await self.set_position( position=Position(position_percent=0), wait_for_completion=wait_for_completion)
Open window. Parameters: * wait_for_completion: If set, function will return after device has reached target position.
def add(self, cell, overwrite_duplicate=False): if isinstance(cell, Cell): if (not overwrite_duplicate and cell.name in self.cell_dict and self.cell_dict[cell.name] is not cell): raise ValueError("[GDSPY] cell named {0} already present in " "library.".format(cell.name)) self.cell_dict[cell.name] = cell else: for c in cell: if (not overwrite_duplicate and c.name in self.cell_dict and self.cell_dict[c.name] is not c): raise ValueError("[GDSPY] cell named {0} already present " "in library.".format(c.name)) self.cell_dict[c.name] = c return self
Add one or more cells to the library. Parameters ---------- cell : ``Cell`` of list of ``Cell`` Cells to be included in the library. overwrite_duplicate : bool If True an existing cell with the same name in the library will be overwritten. Returns ------- out : ``GdsLibrary`` This object.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): if not in match: return for wifi in match[]: ssid = wifi.get(, ) security_type = wifi.get(, ) event_data = plist_event.PlistTimeEventData() event_data.desc = ( ).format( ssid, security_type) event_data.key = event_data.root = datetime_value = wifi.get(, None) if datetime_value: event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) else: date_time = dfdatetime_semantic_time.SemanticTime() event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts relevant Airport entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def in_collision_other(self, other_manager, return_names=False, return_data=False): cdata = fcl.CollisionData() if return_names or return_data: cdata = fcl.CollisionData( request=fcl.CollisionRequest( num_max_contacts=100000, enable_contact=True)) self._manager.collide(other_manager._manager, cdata, fcl.defaultCollisionCallback) result = cdata.result.is_collision objs_in_collision = set() contact_data = [] if return_names or return_data: for contact in cdata.result.contacts: reverse = False names = (self._extract_name(contact.o1), other_manager._extract_name(contact.o2)) if names[0] is None: names = (self._extract_name(contact.o2), other_manager._extract_name(contact.o1)) reverse = True if return_names: objs_in_collision.add(names) if return_data: if reverse: names = reversed(names) contact_data.append(ContactData(names, contact)) if return_names and return_data: return result, objs_in_collision, contact_data elif return_names: return result, objs_in_collision elif return_data: return result, contact_data else: return result
Check if any object from this manager collides with any object from another manager. Parameters ------------------- other_manager : CollisionManager Another collision manager object return_names : bool If true, a set is returned containing the names of all pairs of objects in collision. return_data : bool If true, a list of ContactData is returned as well Returns ------------- is_collision : bool True if a collision occurred between any pair of objects and False otherwise names : set of 2-tup The set of pairwise collisions. Each tuple contains two names (first from this manager, second from the other_manager) indicating that the two corresponding objects are in collision. contacts : list of ContactData All contacts detected
async def close_room(self, room, namespace=None): return await self.server.close_room( room, namespace=namespace or self.namespace)
Close a room. The only difference with the :func:`socketio.Server.close_room` method is that when the ``namespace`` argument is not given the namespace associated with the class is used. Note: this method is a coroutine.
def detect(self, volume_system, vstype=): try: cmd = [] if volume_system.parent.offset: cmd.extend([, str(volume_system.parent.offset // volume_system.disk.block_size)]) if vstype in (, , , , ): cmd.extend([, vstype]) cmd.append(volume_system.parent.get_raw_path()) output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = except Exception as e: if hasattr(e, ) and "(GPT or DOS at 0)" in e.output.decode() and vstype != : volume_system.vstype = try: logger.warning("Error in retrieving volume info: mmls couldnmmls-tgptmultifsdescriptionmetameta-----unallocalloc:fsdescription'])) yield volume
Finds and mounts all volumes based on mmls.
def past_active(self): forms = [] stem = self.sfg3et[:-1] forms.append(stem+"a") forms.append(self.sfg3et+"r") forms.append(self.sfg3et) forms.append(apply_u_umlaut(stem)+"um") forms.append(apply_u_umlaut(stem)+"uð") forms.append(apply_u_umlaut(stem)+"u") return forms
Weak verbs I >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"]) >>> verb.past_active() ['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu'] II >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"]) >>> verb.past_active() ['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu'] III >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["telja", "taldi", "talinn"]) >>> verb.past_active() ['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu'] IV >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["vaka", "vakti", "vakat"]) >>> verb.past_active() ['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu'] :return:
def connect(self): if self.r_session: self.session_logout() if self.admin_party: self._use_iam = False self.r_session = ClientSession( timeout=self._timeout ) elif self._use_basic_auth: self._use_iam = False self.r_session = BasicSession( self._user, self._auth_token, self.server_url, timeout=self._timeout ) elif self._use_iam: self.r_session = IAMSession( self._auth_token, self.server_url, auto_renew=self._auto_renew, client_id=self._iam_client_id, client_secret=self._iam_client_secret, timeout=self._timeout ) else: self.r_session = CookieSession( self._user, self._auth_token, self.server_url, auto_renew=self._auto_renew, timeout=self._timeout ) if self.adapter is not None: self.r_session.mount(self.server_url, self.adapter) if self._client_user_header is not None: self.r_session.headers.update(self._client_user_header) self.session_login() self.r_session.hooks[].append(append_response_error_content)
Starts up an authentication session for the client using cookie authentication if necessary.
def _to_dict(self): _dict = {} if hasattr(self, ) and self.limit is not None: _dict[] = self.limit if hasattr(self, ) and self.model is not None: _dict[] = self.model return _dict
Return a json dictionary representing this model.
def add_member(self, member): select_action = self._update_scope_project_team(select_action=select_action, user=member, user_type=)
Add a single member to the scope. You may only edit the list of members if the pykechain credentials allow this. :param member: single username to be added to the scope list of members :type member: basestring :raises APIError: when unable to update the scope member
def get_ways(self, way_id=None, **kwargs): return self.get_elements(Way, elem_id=way_id, **kwargs)
Alias for get_elements() but filter the result by Way :param way_id: The Id of the way :type way_id: Integer :return: List of elements
def set_mouse_handler_for_range(self, x_min, x_max, y_min, y_max, handler=None): for x, y in product(range(x_min, x_max), range(y_min, y_max)): self.mouse_handlers[x,y] = handler
Set mouse handler for a region.
def hydrate_struct(address_mapper, address): address_family = yield Get(AddressFamily, Dir(address.spec_path)) struct = address_family.addressables.get(address) addresses = address_family.addressables if not struct or address not in addresses: _raise_did_you_mean(address_family, address.target_name) address = next(build_address for build_address in addresses if build_address == address) inline_dependencies = [] def maybe_append(outer_key, value): if isinstance(value, six.string_types): if outer_key != : inline_dependencies.append(Address.parse(value, relative_to=address.spec_path, subproject_roots=address_mapper.subproject_roots)) elif isinstance(value, Struct): collect_inline_dependencies(value) def collect_inline_dependencies(item): for key, value in sorted(item._asdict().items(), key=_key_func): if not AddressableDescriptor.is_addressable(item, key): continue if isinstance(value, MutableMapping): for _, v in sorted(value.items(), key=_key_func): maybe_append(key, v) elif isinstance(value, MutableSequence): for v in value: maybe_append(key, v) else: maybe_append(key, value) collect_inline_dependencies(struct) hydrated_inline_dependencies = yield [Get(HydratedStruct, Address, a) for a in inline_dependencies] dependencies = [d.value for d in hydrated_inline_dependencies] def maybe_consume(outer_key, value): if isinstance(value, six.string_types): if outer_key == : value = Address.parse(value, relative_to=address.spec_path, subproject_roots=address_mapper.subproject_roots) else: value = dependencies[maybe_consume.idx] maybe_consume.idx += 1 elif isinstance(value, Struct): value = consume_dependencies(value) return value maybe_consume.idx = 0 def consume_dependencies(item, args=None): hydrated_args = args or {} for key, value in sorted(item._asdict().items(), key=_key_func): if not AddressableDescriptor.is_addressable(item, key): hydrated_args[key] = value continue if isinstance(value, MutableMapping): container_type = type(value) hydrated_args[key] = container_type((k, maybe_consume(key, v)) for k, v in sorted(value.items(), key=_key_func)) elif isinstance(value, MutableSequence): container_type = type(value) hydrated_args[key] = container_type(maybe_consume(key, v) for v in value) else: hydrated_args[key] = maybe_consume(key, value) return _hydrate(type(item), address.spec_path, **hydrated_args) yield HydratedStruct(consume_dependencies(struct, args={: address}))
Given an AddressMapper and an Address, resolve a Struct from a BUILD file. Recursively collects any embedded addressables within the Struct, but will not walk into a dependencies field, since those should be requested explicitly by rules.
def print_value(value: Any, type_: GraphQLInputType) -> str: return print_ast(ast_from_value(value, type_))
Convenience function for printing a Python value
def shiftx_image2d_flux(image2d_orig, xoffset): if image2d_orig.ndim == 1: naxis1 = image2d_orig.size elif image2d_orig.ndim == 2: naxis2, naxis1 = image2d_orig.shape else: print(, image2d_orig.shape) raise ValueError() return resample_image2d_flux(image2d_orig, naxis1=naxis1, cdelt1=1, crval1=1, crpix1=1, coeff=[xoffset, 1])
Resample 2D image using a shift in the x direction (flux is preserved). Parameters ---------- image2d_orig : numpy array 2D image to be resampled. xoffset : float Offset to be applied. Returns ------- image2d_resampled : numpy array Resampled 2D image.
def import_lsdinst(self, struct_data): self.name = struct_data[] self.automate = struct_data[][] self.pan = struct_data[][] if self.table is not None: self.table.import_lsdinst(struct_data)
import from an lsdinst struct
def has_neigh(tag_name, params=None, content=None, left=True): def has_neigh_closure(element): if not element.parent \ or not (element.isTag() and not element.isEndTag()): return False childs = element.parent.childs childs = filter( lambda x: (x.isTag() and not x.isEndTag()) \ or x.getContent().strip() or x is element, childs ) if len(childs) <= 1: return False ioe = childs.index(element) if left and ioe > 0: return is_equal_tag(childs[ioe - 1], tag_name, params, content) if not left and ioe + 1 < len(childs): return is_equal_tag(childs[ioe + 1], tag_name, params, content) return False return has_neigh_closure
This function generates functions, which matches all tags with neighbours defined by parameters. Args: tag_name (str): Tag has to have neighbour with this tagname. params (dict): Tag has to have neighbour with this parameters. params (str): Tag has to have neighbour with this content. left (bool, default True): Tag has to have neigbour on the left, or right (set to ``False``). Returns: bool: True for every matching tag. Note: This function can be used as parameter for ``.find()`` method in HTMLElement.
def groupByKey(self, numPartitions=None): if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
Return a new DStream by applying groupByKey on each RDD.
def update(self, columns=(), by=(), where=(), **kwds): return self._seu(, columns, by, where, kwds)
update from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.update('a*2', ... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE a b ---- 1 10 2 20 6 30
def _utc_year(self): d = self._utc_float() - 1721059.5 C = 365 * 100 + 24 d -= 365 d += d // C - d // (4 * C) d += 365 K = 365 * 3 + 366 d -= (d + K*7//8) // K return d / 365.0
Return a fractional UTC year, for convenience when plotting. An experiment, probably superseded by the ``J`` attribute below.
def fetch_open_orders(self, limit: int) -> List[Order]: return self._fetch_orders_limit(self._open_orders, limit)
Fetch latest open orders, must provide a limit.
def _verified_version_from_id(version_id): try: return SerializationVersion(version_id) except ValueError as error: raise NotSupportedError("Unsupported version {}".format(version_id), error)
Load a message :class:`SerializationVersion` for the specified version ID. :param int version_id: Message format version ID :return: Message format version :rtype: SerializationVersion :raises NotSupportedError: if unsupported version ID is received
def configure_stream(level=): root_logger = logging.getLogger() root_logger.setLevel(level) template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s" formatter = logging.Formatter(template) console = logging.StreamHandler() console.setLevel(level) console.setFormatter(formatter) root_logger.addHandler(console) return root_logger
Configure root logger using a standard stream handler. Args: level (string, optional): lowest level to log to the console Returns: logging.RootLogger: root logger instance with attached handler
def commit_withdrawal(self, account_id, withdrawal_id, **params): response = self._post( , , account_id, , withdrawal_id, , data=params) return self._make_api_object(response, Withdrawal)
https://developers.coinbase.com/api/v2#commit-a-withdrawal
def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7): degree = int(1 + np.round(alpha * n_features / 2.)) if random_sign: sign_row = -1.0 * np.ones(degree) + 2 * ( prng.uniform(low=0, high=1, size=degree) > .5 ) else: sign_row = -1.0 * np.ones(degree) MAX_ATTEMPTS = 5 attempt = 0 row = np.zeros((n_features,)) while np.sum(row) == 0 and attempt < MAX_ATTEMPTS: row = np.zeros((n_features,)) row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree) attempt += 1 if np.sum(row) == 0: raise Exception("InvalidLattice", "Rows sum to 0.") return row /= np.abs(np.sum(row)) return sp.linalg.toeplitz(c=row, r=row)
Returns the adjacency matrix for a lattice network. The resulting network is a Toeplitz matrix with random values summing between -1 and 1 and zeros along the diagonal. The range of the values can be controlled via the parameters low and high. If random_sign is false, all entries will be negative, otherwise their sign will be modulated at random with probability 1/2. Each row has maximum edges of np.ceil(alpha * n_features). Parameters ----------- n_features : int alpha : float (0, 1) The complexity / sparsity factor. random sign : bool (default=False) Randomly modulate each entry by 1 or -1 with probability of 1/2. low : float (0, 1) (default=0.3) Lower bound for np.random.RandomState.uniform before normalization. high : float (0, 1) > low (default=0.7) Upper bound for np.random.RandomState.uniform before normalization.
def unit_overlap(evaluated_model, reference_model): if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)): raise ValueError( "Arguments has to be instances of ") terms1 = frozenset(evaluated_model.terms) terms2 = frozenset(reference_model.terms) if not terms1 and not terms2: raise ValueError( "Documents can't be empty. Please pass the valid documents.") common_terms_count = len(terms1 & terms2) return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)
Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same.
async def get_data(self, url): logger.debug(, url) with aiohttp.ClientSession() as session: async with session.get(url, headers=self.headers) as response: body = json.loads((await response.read()).decode()) if response.status == HTTPStatus.OK: if url != self.url_builder(): await self._update_config() return body elif response.status == HTTPStatus.TOO_MANY_REQUESTS: timeout = self.calculate_timeout( response.headers[], ) logger.warning( , timeout, ) await asyncio.sleep(timeout) return await self.get_data(url) logger.warning( , response.status, body.get(, ) )
Get data from the TMDb API via :py:func:`aiohttp.get`. Notes: Updates configuration (if required) on successful requests. Arguments: url (:py:class:`str`): The endpoint URL and params. Returns: :py:class:`dict`: The parsed JSON result.
def _RunCommand(self, command): arguments = shlex.split(command) process = subprocess.Popen( arguments, stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) if not process: raise RuntimeError("Running: {0:s} failed.".format(command)) output, error = process.communicate() if process.returncode != 0: error = "\n".join(error.split("\n")[-5:]) raise RuntimeError("Running: {0:s} failed with error:\n{1:s}.".format( command, error)) return output
Runs the command.
def main(sample_id, assembly_file, minsize): logger.info("Starting assembly file processing") warnings = [] fails = "" logger.info("Starting assembly parsing") assembly_obj = Assembly(assembly_file, 0, 0, sample_id, minsize) if in assembly_file: assembler = "SPAdes" else: assembler = "MEGAHIT" with open(".warnings", "w") as warn_fh: t_80 = int(minsize) * 0.8 t_150 = int(minsize) * 1.5 assembly_len = assembly_obj.get_assembly_length() logger.debug("Checking assembly length: {}".format(assembly_len)) if assembly_obj.nORFs < 1: warn_msg = "No complete ORFs found." warn_fh.write(warn_msg) fails = warn_msg if assembly_len < t_80: logger.warning("Assembly size ({}) smaller than the minimum " "threshold of 80% of expected genome size. " "Applying contig filters without the k-mer " "coverage filter".format(assembly_len)) assembly_len = assembly_obj.get_assembly_length() logger.debug("Checking updated assembly length: " "{}".format(assembly_len)) if assembly_len < t_80: warn_msg = "Assembly size smaller than the minimum" \ " threshold of 80% of expected genome size: {}".format( assembly_len) logger.warning(warn_msg) warn_fh.write(warn_msg) fails = warn_msg if assembly_len > t_150: warn_msg = "Assembly size ({}) larger than the maximum" \ " threshold of 150% of expected genome size.".format( assembly_len) logger.warning(warn_msg) warn_fh.write(warn_msg) fails = warn_msg with open(".report.json", "w") as json_report: json_dic = { "tableRow": [{ "sample": sample_id, "data": [ {"header": "Contigs ({})".format(assembler), "value": len(assembly_obj.contigs), "table": "assembly", "columnBar": True}, {"header": "Assembled BP ({})".format(assembler), "value": assembly_len, "table": "assembly", "columnBar": True}, {"header": "ORFs", "value": assembly_obj.nORFs, "table": "assembly", "columnBar":False} ] }], } if warnings: json_dic["warnings"] = [{ "sample": sample_id, "table": "assembly", "value": warnings }] if fails: json_dic["fail"] = [{ "sample": sample_id, "table": "assembly", "value": [fails] }] json_report.write(json.dumps(json_dic, separators=(",", ":"))) with open(".status", "w") as status_fh: status_fh.write("pass")
Main executor of the process_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly: str Path to the fatsa file generated by the assembler. minsize: str Min contig size to be considered a complete ORF
def get_mutator(self, obj, obj_type): if obj_type == unicode: obj_type = str obj = str(obj) return self._get_random(obj_type)(obj)
Get a random mutator for the given type
def find_lb_by_name(self, name): log.debug("Finding load balancers matching name " % name) matching = filter(lambda l: l[] == name, self.list_lbs()) if len(matching) > 1: raise ValueError("Ambiguous; more than one load balancer matched " % name) if matching: log.info("Found existing load balancer, %s" % matching[0][]) return matching[0] return None
Look up a LBaaS instance by name (rather than id) :attr string name: The LBaaS name assigned at creation time :rtype :class:`dict`
def dnld_annotation(assc_file, prt=sys.stdout): if not os.path.isfile(assc_file): assc_http = "http://current.geneontology.org/annotations/" _, assc_base = os.path.split(assc_file) src = os.path.join(assc_http, "{ASSC}.gz".format(ASSC=assc_base)) dnld_file(src, assc_file, prt, loading_bar=None)
Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/
def delete(self,pool_or_cursor): ".. warning:: pgmock doesnt tested" vals=self.pkey_vals() whereclause=.join(%k for k in self.PKEY.split()) q=%(self.TABLE,whereclause) commit_or_execute(pool_or_cursor,q,vals)
.. warning:: pgmock doesn't support delete yet, so this isn't tested
def model(self, inputs, mode=): training = (mode == ) with tf.variable_scope() as scope: conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding=) bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding=) bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding=, name=scope.name) with tf.variable_scope() as scope: conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding=) bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding=) bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding=, name=scope.name) with tf.variable_scope() as scope: conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding=) bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding=) bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding=, name=scope.name) with tf.variable_scope() as scope: flat = tf.layers.flatten(pool) fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu) softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax) return softmax
Build a simple convnet (BN before ReLU). Args: inputs: a tensor of size [batch_size, height, width, channels] mode: string in ['train', 'test'] Returns: the last op containing the predictions Note: Best score Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656 Worst score Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874
def check_new_version_available(this_version): import requests pypi_url = resp = requests.get(pypi_url, timeout=1.5) top_version = resp.json()[][] return this_version != top_version
Checks if a newer version of Zappa is available. Returns True is updateable, else False.
def tomindec(origin): origin = float(origin) degrees = int(origin) minutes = (origin % 1) * 60 return degrees, minutes
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
def get_active_entry(user, select_for_update=False): entries = apps.get_model(, ).no_join if select_for_update: entries = entries.select_for_update() entries = entries.filter(user=user, end_time__isnull=True) if not entries.exists(): return None if entries.count() > 1: raise ActiveEntryError() return entries[0]
Returns the user's currently-active entry, or None.
def get_str(self, key, default=None): if key: for provider in self.providers: value = provider.get_str(key) if value is not None: return value return default
Args: key (str | unicode | None): Key to lookup default (str | unicode | None): Default to use if key is not configured Returns: (str | None): Value of key, if defined
def pmt_angles(self): if self._pmt_angles == []: mask = (self.pmts.du == 1) & (self.pmts.floor == 1) self._pmt_angles = self.pmts.dir[mask] return self._pmt_angles
A list of PMT directions sorted by PMT channel, on DU-1, floor-1
def get_error(self, block=False, timeout=None): try: error = self._errors.get(block=block, timeout=timeout) return error except Exception: return None
Removes and returns an error from self._errors Args: block(bool): if True block until a RTMMessage is available, else it will return None when self._inbox is empty timeout(int): it blocks at most timeout seconds Returns: error if inbox is not empty, else None
def is_published(self): field980 = record_get_field_instances(self.record, ) field773 = record_get_field_instances(self.record, ) for f980 in field980: if in field_get_subfields(f980): for f773 in field773: if in field_get_subfields(f773): return True return False
Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False
def detect_number_of_cores(): if hasattr(os, "sysconf"): if "SC_NPROCESSORS_ONLN" in os.sysconf_names: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"])) try: ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", "")) if ncpus > 0: return ncpus except ValueError: pass return 1
Detects the number of cores on a system. Cribbed from pp.
def gen_primes(): D = {} q = 2 while True: if q not in D: yield q D[q * q] = [q] else: for p in D[q]: D.setdefault(p + q, []).append(p) del D[q] q += 1
Generate an infinite sequence of prime numbers.
def p_labelled_statement(self, p): p[0] = ast.Label(identifier=p[1], statement=p[3])
labelled_statement : identifier COLON statement
def padding(s, bs=AES.block_size): s = to_bytes(s) if len(s) % bs == 0: res = s + b.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - 1)])) + to_bytes(chr(96 - bs)) elif len(s) % bs > 0 and len(s) > bs: res = s + b.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) % bs - 1)])) + to_bytes(chr(96 + len(s) % bs - bs)) else: res = s + b.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) - 1)])) + to_bytes(chr(96 + len(s) - bs)) return res
Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`.
def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None, region=None, key=None, keyid=None, profile=None): res = __salt__[](name, tags=None, region=region, key=key, keyid=keyid, profile=profile) if not res.get(): return {: bool(res)} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {: bool(conn)} kwargs = {} for key in (, ): if locals()[key] is not None: kwargs[key] = str(locals()[key]) if locals()[] is not None: kwargs[] = int(locals()[]) info = conn.describe_db_parameter_groups(DBParameterGroupName=name, **kwargs) if not info: return {: bool(info), : .format(name)} return {: bool(info), : .format(name)} except ClientError as e: return {: __utils__[](e)}
Returns a list of `DBParameterGroup` descriptions. CLI example to description of parameter group:: salt myminion boto_rds.describe_parameter_group parametergroupname\ region=us-east-1
def get_upload_path(self, filename): user_folder = str(self.user.pk) if self.user else root, ext = os.path.splitext(filename) return os.path.join(, , user_folder, self.get_file_name(filename) + ext)
Override this in proxy subclass to customize upload path. Default upload path is :file:`/media/images/<user.id>/<filename>.<ext>` or :file:`/media/images/common/<filename>.<ext>` if user is not set. ``<filename>`` is returned by :meth:`~generic_images.models.AbstractAttachedImage.get_file_name` method. By default it is probable id of new image (it is predicted as it is unknown at this stage).
def _check_link_completion(self, link, fail_pending=False, fail_running=False): status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
def normalize_file(file, separators=None): if separators is None: separators = NORMALIZE_PATH_SEPS norm_file = file for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) if norm_file.startswith(): norm_file = norm_file[2:] return norm_file
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`).
def set_weights(self, weights_values: dict, ignore_missing=False): network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): for layer_name in weights_values: with tf.variable_scope(layer_name, reuse=True): for param_name, data in weights_values[layer_name].items(): try: var = tf.get_variable(param_name) self._session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
Sets the weights values of the network. :param weights_values: dictionary with weights for each layer
def get_table_acl(self, table_name, timeout=None): _validate_not_none(, table_name) request = HTTPRequest() request.method = request.host_locations = self._get_host_locations(secondary=True) request.path = + _to_str(table_name) request.query = { : , : _int_to_str(timeout), } return self._perform_request(request, _convert_xml_to_signed_identifiers)
Returns details about any stored access policies specified on the table that may be used with Shared Access Signatures. :param str table_name: The name of an existing table. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the table. :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
def begin(self): self._global_step_tensor = tf.train.get_global_step() if self._global_step_tensor is None: raise RuntimeError( )
Called once before using the session to check global step.
def _create_put_request(self, resource, billomat_id, command=None, send_data=None): assert (isinstance(resource, str)) if isinstance(billomat_id, int): billomat_id = str(billomat_id) if not command: command = else: command = + command response = self.session.put( url=self.api_url + resource + + billomat_id + command, data=json.dumps(send_data), ) return self._handle_response(response)
Creates a put request and return the response data
def filter_jobs(sacct_jobs, failed=True): categories = FAILED_CATEGORIES if failed else NORMAL_CATEGORIES filtered_jobs = [job for job in sacct_jobs if job[] in categories] return filtered_jobs
Filter jobs that have a FAILED etc. status.
def get_config_map(self, name): response = self.os.get_config_map(name) config_map_response = ConfigMapResponse(response.json()) return config_map_response
Get a ConfigMap object from the server Raises exception on error :param name: str, name of configMap to get from the server :returns: ConfigMapResponse containing the ConfigMap with the requested name
def timezone_at(self, *, lng, lat): lng, lat = rectify_coordinates(lng, lat) x = coord2int(lng) y = coord2int(lat) shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat) self.shortcuts_unique_id.seek( (180 * NR_SHORTCUTS_PER_LAT * NR_BYTES_H * shortcut_id_x + NR_BYTES_H * shortcut_id_y)) try: return timezone_names[unpack(DTYPE_FORMAT_H, self.shortcuts_unique_id.read(NR_BYTES_H))[0]] except IndexError: possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y) nr_possible_polygons = len(possible_polygons) if nr_possible_polygons == 0: return None if nr_possible_polygons == 1: return timezone_names[self.id_of(possible_polygons[0])] ids = self.id_list(possible_polygons, nr_possible_polygons) for i in range(nr_possible_polygons): same_element = all_the_same(pointer=i, length=nr_possible_polygons, id_list=ids) if same_element != -1: return timezone_names[same_element] polygon_nr = possible_polygons[i] self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr) boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4) if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]): outside_all_holes = True for hole_coordinates in self._holes_of_line(polygon_nr): if inside_polygon(x, y, hole_coordinates): outside_all_holes = False break if outside_all_holes: if inside_polygon(x, y, self.coords_of(line=polygon_nr)): return timezone_names[ids[i]] raise ValueError()
this function looks up in which polygons the point could be included in to speed things up there are shortcuts being used (stored in a binary file) especially for large polygons it is expensive to check if a point is really included, so certain simplifications are made and even when you get a hit the point might actually not be inside the polygon (for example when there is only one timezone nearby) if you want to make sure a point is really inside a timezone use 'certain_timezone_at' :param lng: longitude of the point in degree (-180 to 180) :param lat: latitude in degree (90 to -90) :return: the timezone name of a matching polygon or None
def set_filters(self, filters): if not isinstance(filters, dict): raise Exception("filters must be a dict") self.filters = {} for key in filters.keys(): value = filters[key] self.add_filter(key,value)
set and validate filters dict
def set_value(self, value): if not isinstance(value, str): raise TypeError("A value must be a string, got %s." % value) self.__value = value
Set the value associated with the keyword
def get_stats(self, stat_name): return [self.get_stat(r, stat_name) for r in self.statistics.keys()]
:param stat_name: requested statistics name. :returns: all values of the requested statistic for all objects.
def view(self): print("") conf_args = [ "RELEASE", "SLACKWARE_VERSION", "COMP_ARCH", "BUILD_PATH", "PACKAGES", "PATCHES", "CHECKMD5", "DEL_ALL", "DEL_BUILD", "SBO_BUILD_LOG", "MAKEFLAGS", "DEFAULT_ANSWER", "REMOVE_DEPS_ANSWER", "SKIP_UNST", "RSL_DEPS", "DEL_DEPS", "USE_COLORS", "DOWNDER", "DOWNDER_OPTIONS", "SLACKPKG_LOG", "ONLY_INSTALLED", "PRG_BAR", "EDITOR", "NOT_DOWNGRADE" ] read_conf = Utils().read_file(self.config_file) for line in read_conf.splitlines(): if not line.startswith(" print("{0}".format(line)) else: print("{0}{1}{2}".format(self.meta.color["CYAN"], line, self.meta.color["ENDC"])) print("")
View slpkg config file
def clean_promoted_guids(raw_promoted_guids): valid = True for row in raw_promoted_guids: if len(row) != 2: valid = False break if not ( (isinstance(row[0], str) or isinstance(row[0], unicode)) and (isinstance(row[1], int) or isinstance(row[1], float)) ): valid = False break if valid: return raw_promoted_guids return []
Verify that the promoted GUIDs are formatted correctly, otherwise strip it down into an empty list.
def delete_activity(self, id_num): url = self._build_url(, , id_num) r = self.session.delete(url) r.raise_for_status() return r
Delete an activity (run). :param id_num: The activity ID to delete
def _fill_queue(self, loglstar): point_queue = [] axes_queue = [] while self.nqueue < self.queue_size: if self._beyond_unit_bound(loglstar): point, axes = self.propose_point() evolve_point = self.evolve_point else: point = self.rstate.rand(self.npdim) axes = np.identity(self.npdim) evolve_point = sample_unif point_queue.append(point) axes_queue.append(axes) self.nqueue += 1 loglstars = [loglstar for i in range(self.queue_size)] scales = [self.scale for i in range(self.queue_size)] ptforms = [self.prior_transform for i in range(self.queue_size)] logls = [self.loglikelihood for i in range(self.queue_size)] kwargs = [self.kwargs for i in range(self.queue_size)] args = zip(point_queue, loglstars, axes_queue, scales, ptforms, logls, kwargs) if self.use_pool_evolve: self.queue = list(self.M(evolve_point, args)) else: self.queue = list(map(evolve_point, args))
Sequentially add new live point proposals to the queue.
def new(self, array): assert len(self.array) == len(array) arr = object.__new__(self.__class__) arr.dt = self.dt arr.slicedic = self.slicedic arr.array = array return arr
Convert an array of compatible length into a DictArray: >>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]}) >>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2 <DictArray PGA: [0 1 2] PGV: [3 4]>
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning
def extract_full(rec, sites, flank, fw): for s in sites: newid = "{0}:{1}".format(rec.name, s) left = max(s - flank, 0) right = min(s + flank, len(rec)) frag = rec.seq[left:right].strip("Nn") newrec = SeqRecord(frag, id=newid, description="") SeqIO.write([newrec], fw, "fasta")
Full extraction of seq flanking the sites.
def has_perm(self, service, perm_name, obj, call_name): user = service.user if not (perm_name is False): if not user.has_perm(perm_name, obj=obj): LOG_PERM.warn( u, user, perm_name, call_name, obj) raise PermissionDenied(u % (service.user, perm_name, obj)) LOG_PERM.debug( u, user, call_name, perm_name, obj)
Raise PermissionDenied if user has no permission in object
def mask(self): return np.array(list(set(np.concatenate([self.outmask, self.badmask, self.transitmask, self.nanmask]))), dtype=int)
The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences.
def irregular_sampling(T, N, rseed=None): sampling_period = (T/float(N)) N = int(N) np.random.seed(rseed) t = np.linspace(0, T, num=5*N) t[1:-1] += sampling_period*0.5*np.random.randn(5*N-2) P = np.random.permutation(5*N) t_irr = np.sort(t[P[:N]]) return t_irr
Generates an irregularly sampled time vector by perturbating a linearly spaced vector and latter deleting a certain number of points Parameters ---------- T: float Time span of the vector, i.e. how long it is in time N: positive integer Number of samples of the resulting time vector rseed: Random seed to feed the random number generator Returns ------- t_irr: ndarray An irregulary sampled time vector
def step1_get_authorize_url(self, redirect_uri=None, state=None): if redirect_uri is not None: logger.warning(( )) self.redirect_uri = redirect_uri if self.redirect_uri is None: raise ValueError() query_params = { : self.client_id, : self.redirect_uri, : self.scope, } if state is not None: query_params[] = state if self.login_hint is not None: query_params[] = self.login_hint if self._pkce: if not self.code_verifier: self.code_verifier = _pkce.code_verifier() challenge = _pkce.code_challenge(self.code_verifier) query_params[] = challenge query_params[] = query_params.update(self.params) return _helpers.update_query_params(self.auth_uri, query_params)
Returns a URI to redirect to the provider. Args: redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. This parameter is deprecated, please move to passing the redirect_uri in via the constructor. state: string, Opaque state string which is passed through the OAuth2 flow and returned to the client as a query parameter in the callback. Returns: A URI as a string to redirect the user to begin the authorization flow.
def check_bam(bam, samtype="bam"): ut.check_existance(bam) samfile = pysam.AlignmentFile(bam, "rb") if not samfile.has_index(): pysam.index(bam) samfile = pysam.AlignmentFile(bam, "rb") logging.info("Nanoget: No index for bam file could be found, created index.") if not samfile.header[][] == : logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam)) sys.exit("Please use a bam file sorted by coordinate.") if samtype == "bam": logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format( bam, samfile.mapped, samfile.unmapped)) if samfile.mapped == 0: logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam)) sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam)) return samfile
Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read
def get_logins(self, user_id, start_date=None): if start_date is None: date_object = datetime.datetime.today() - datetime.timedelta(days=30) start_date = date_object.strftime("%m/%d/%Y 0:0:0") date_filter = { : { : { : , : [{: , : [start_date]}] } } } login_log = self.user_service.getLoginAttempts(id=user_id, filter=date_filter) return login_log
Gets the login history for a user, default start_date is 30 days ago :param int id: User id to get :param string start_date: "%m/%d/%Y %H:%M:%s" formatted string. :returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/ Example:: get_logins(123, '04/08/2018 0:0:0')
def animate(self,*args,**kwargs): try: from IPython.display import HTML except ImportError: raise ImportError("Orbit.animate requires ipython/jupyter to be installed") if (kwargs.get(,False) \ and kwargs.get(,self._roSet)) or \ (not in kwargs \ and kwargs.get(,self._roSet)): labeldict= {:, :, :, :, :, :, :, :, :, :, :, :, :, :, :, :, :, :, :, :} else: labeldict= {:,:,:,:, :,:,:r, :, :,:,:,:, :,:, :,:, :r,:r, :r, :r} labeldict.update({:, :, :, :, :, :, :, :, :, :, :, :, :, :, :, :}) kwargs[]= False if not in kwargs and not in kwargs: if len(self.vxvv) == 3: d1= d2= elif len(self.vxvv) == 4: d1= d2= elif len(self.vxvv) == 2: d1= d2= elif len(self.vxvv) == 5 or len(self.vxvv) == 6: d1= d2= elif not in kwargs: d2= kwargs.pop() d1= elif not in kwargs: d1= kwargs.pop() d2= else: d1= kwargs.pop() d2= kwargs.pop() xs= [] ys= [] xlabels= [] ylabels= [] if isinstance(d1,str) or callable(d1): d1s= [d1] d2s= [d2] else: d1s= d1 d2s= d2 if len(d1s) > 3: raise ValueError() all_xlabel= kwargs.get(,[None for d in d1]) all_ylabel= kwargs.get(,[None for d in d2]) for d1,d2, xlabel, ylabel in zip(d1s,d2s,all_xlabel,all_ylabel): x= self._parse_plot_quantity(d1,**kwargs) y= self._parse_plot_quantity(d2,**kwargs) xs.append(x) ys.append(y) if xlabel is None: xlabels.append(labeldict.get(d1,)) else: xlabels.append(xlabel) if ylabel is None: ylabels.append(labeldict.get(d2,)) else: ylabels.append(ylabel) kwargs.pop(,None) kwargs.pop(,None) kwargs.pop(,None) kwargs.pop(,None) kwargs.pop(,None) kwargs.pop(,None) kwargs.pop(,None) width= kwargs.pop(,600) height= kwargs.pop(,400) load_jslibs= kwargs.pop(,True) if load_jslibs: load_jslibs_code= else: load_jslibs_code= "" nplots= len(xs) jsonDict= {} jsonDict[]= xs[0].tolist() jsonDict[]= ys[0].tolist() for ii in range(1,nplots): jsonDict[ % (ii+1)]= xs[ii].tolist() jsonDict[ % (ii+1)]= ys[ii].tolist() json_filename= kwargs.pop(,None) if json_filename is None: jd= json.dumps(jsonDict) json_code= .format(jd=jd) close_json_code= "" else: with open(json_filename,) as jfile: json.dump(jsonDict,jfile) json_code= .format(jfilename=json_filename) close_json_code= "});" self.divid= \ +.join(choice(ascii_lowercase) for i in range(24)) button_width= 419.51+4.*10. button_margin_left= int(nu.round((width-button_width)/2.)) if button_margin_left < 0: button_margin_left= 0 if len(d1s) == 1: xmin= [0,0,0] xmax= [1,1,1] elif len(d1s) == 2: xmin= [0,0.55,0] xmax= [0.45,1,1] elif len(d1s) == 3: xmin= [0,0.365,0.73] xmax= [0.27,0.635,1] layout= .format(xlabel=xlabels[0],ylabel=ylabels[0],xmin=xmin[0],xmax=xmax[0]) for ii in range(1,nplots): layout+= .format(idx=ii+1,xlabel=xlabels[ii],ylabel=ylabels[ii], xmin=xmin[ii],xmax=xmax[ii]) layout+= if len(d1s) > 1: setup_trace2= .format(divid=self.divid) delete_trace4= .format(divid=self.divid) delete_trace3= .format(divid=self.divid) update_trace34= .format(divid=self.divid) else: setup_trace2= delete_trace4= "" delete_trace3= "" update_trace34= "" if len(d1s) > 2: setup_trace3= .format(divid=self.divid) delete_trace6= .format(divid=self.divid) delete_trace5= .format(divid=self.divid) update_trace56= .format(divid=self.divid) elif len(d1s) > 1: setup_trace3= delete_trace5= "" delete_trace6= "" update_trace56= "" else: setup_trace3= "" delete_trace5= "" delete_trace6= "" update_trace56= "" return HTML(.format(json_code=json_code,close_json_code=close_json_code, divid=self.divid,width=width,height=height, button_margin_left=button_margin_left, layout=layout,load_jslibs_code=load_jslibs_code, setup_trace2=setup_trace2,setup_trace3=setup_trace3, delete_trace4=delete_trace4,delete_trace6=delete_trace6, delete_trace3=delete_trace3,delete_trace5=delete_trace5, update_trace34=update_trace34, update_trace56=update_trace56))
NAME: animate PURPOSE: animate an Orbit INPUT: d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots d2= second dimension to plot; can be list with up to three entries for three subplots width= (600) width of output div in px height= (400) height of output div in px json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header) ro= (Object-wide default) physical scale for distances to use to convert vo= (Object-wide default) physical scale for velocities to use to convert use_physical= use to override Object-wide default for using a physical scale for output +kwargs for ra,dec,ll,bb, etc. functions OUTPUT: IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function HISTORY: 2017-09-17-24 - Written - Bovy (UofT) 2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
def read_object(ctx, pin, object_id): controller = ctx.obj[] def do_read_object(retry=True): try: click.echo(controller.get_data(object_id)) except APDUError as e: if e.sw == SW.NOT_FOUND: ctx.fail() elif e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED: _verify_pin(ctx, controller, pin) do_read_object(retry=False) else: raise do_read_object()
Read arbitrary PIV object. Read PIV object by providing the object id. \b OBJECT-ID Id of PIV object in HEX.
def sub_dfs_by_size(df, size): for i in range(0, len(df), size): yield (df.iloc[i:i + size])
Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin
def get_hangul_syllable_type_property(value, is_bytes=False): obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type if value.startswith(): negated = value[1:] value = + unidata.unicode_alias[].get(negated, negated) else: value = unidata.unicode_alias[].get(value, value) return obj[value]
Get `HANGUL SYLLABLE TYPE` property.
def post_request(profile, resource, payload): url = get_url(profile, resource) headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response.json()
Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict.
def _on_items_changed(self, change): if change[] != : return op = change[] if op == : i = len(change[])-1 self.adapter.notifyItemInserted(i) elif op == : self.adapter.notifyItemInserted(change[]) elif op in (, ): self.adapter.notifyItemRemoved(change[]) elif op == : self.adapter.notifyItemChanged(change[]) elif op == : n = len(change[]) i = len(change[])-n self.adapter.notifyItemRangeInserted(i, n) elif op in (, , ): self.adapter.notifyDataSetChanged()
Observe container events on the items list and update the adapter appropriately.
def update_multi_precision(self, index, weight, grad, state): if self.multi_precision and weight.dtype == numpy.float16: weight_master_copy = state[0] original_state = state[1] grad32 = grad.astype(numpy.float32) self.update(index, weight_master_copy, grad32, original_state) cast(weight_master_copy, dtype=weight.dtype, out=weight) else: self.update(index, weight, grad, state)
Updates the given parameter using the corresponding gradient and state. Mixed precision version. Parameters ---------- index : int The unique index of the parameter into the individual learning rates and weight decays. Learning rates and weight decay may be set via `set_lr_mult()` and `set_wd_mult()`, respectively. weight : NDArray The parameter to be updated. grad : NDArray The gradient of the objective with respect to this parameter. state : any obj The state returned by `create_state()`.
def reset_new_request(self): raw_url = self.new_request[] parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) new_url = self._join_url( parsed_url, [i for i in qsl if i not in self.ignore[]]) self.new_request[] = new_url self.logger_function( % self.ignore) for key in self.ignore[]: self.new_request[].pop(key) if not self.new_request.get(): self.new_request.pop(, None) if self.ignore[] and not in self.ignore[]: headers = self.new_request[] headers = {key.title(): headers[key] for key in headers} if in headers: cookies = SimpleCookie(headers[]) new_cookie = .join([ i[1].OutputString() for i in cookies.items() if i[0] not in self.ignore[] ]) self.new_request[][] = new_cookie if self.new_request[] == : data = self.new_request.get() if data: if isinstance(data, dict): for key in self.ignore[]: data.pop(key) if (not data) or self.ignore[]: self.new_request.pop(, None) if self.has_json_data and in self.new_request: json_data = json.loads(data.decode(self.encoding)) for key in self.ignore[]: json_data.pop(key) self.new_request[] = json.dumps(json_data).encode( self.encoding) return self.new_request
Remove the non-sense args from the self.ignore, return self.new_request
def _parse_args(): parser = optparse.OptionParser() parser.add_option( , dest=, action=, default=False, help=) parser.add_option( , dest=, metavar="URL", default=DEFAULT_URL, help=) parser.add_option( , dest=, action=, const=lambda: download_file_insecure, default=get_best_downloader, help= ) options, args = parser.parse_args() return options
Parse the command line for options
def actions(acts, done): def _intermediate(acc, action): result = action(acc[]) values = concatv(acc[], [result[]]) return {: values, : result[]} def _actions(seed): init = {: [], : seed} result = reduce(_intermediate, acts, init) keep = remove(lambda x: x is None, result[]) return done(keep, result[]) return _actions
Prepare actions pipeline. :param tuple acts: called functions :param function done: get result from actions :returns function: function that starts executio
def hashkey(*args, **kwargs): if kwargs: return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark)) else: return _HashedTuple(args)
Return a cache key for the specified hashable arguments.
def get_taints(arg, taint=None): if not issymbolic(arg): return for arg_taint in arg.taint: if taint is not None: m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE) if m: yield arg_taint else: yield arg_taint return
Helper to list an object taints. :param arg: a value or Expression :param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
def _readlines(fname, fpointer1=open, fpointer2=open): try: with fpointer1(fname, "r") as fobj: return fobj.readlines() except UnicodeDecodeError: with fpointer2(fname, "r", encoding="utf-8") as fobj: return fobj.readlines()
Read all lines from file.
def triple_reference_of(label: ShExJ.tripleExprLabel, cntxt: Context) -> Optional[ShExJ.tripleExpr]: te: Optional[ShExJ.tripleExpr] = None if cntxt.schema.start is not None: te = triple_in_shape(cntxt.schema.start, label, cntxt) if te is None: for shapeExpr in cntxt.schema.shapes: te = triple_in_shape(shapeExpr, label, cntxt) if te: break return te
Search for the label in a Schema
def _virt_call(domain, function, section, comment, connection=None, username=None, password=None, **kwargs): ret = {: domain, : {}, : True, : } targeted_domains = fnmatch.filter(__salt__[](), domain) changed_domains = list() ignored_domains = list() for targeted_domain in targeted_domains: try: response = __salt__[.format(function)](targeted_domain, connection=connection, username=username, password=password, **kwargs) if isinstance(response, dict): response = response[] changed_domains.append({: targeted_domain, function: response}) except libvirt.libvirtError as err: ignored_domains.append({: targeted_domain, : six.text_type(err)}) if not changed_domains: ret[] = False ret[] = if ignored_domains: ret[] = {: ignored_domains} else: ret[] = {section: changed_domains} ret[] = comment return ret
Helper to call the virt functions. Wildcards supported. :param domain: :param function: :param section: :param comment: :return:
def create_rrset(self, zone_name, rtype, owner_name, ttl, rdata): if type(rdata) is not list: rdata = [rdata] rrset = {"ttl": ttl, "rdata": rdata} return self.rest_api_connection.post("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name, json.dumps(rrset))
Creates a new RRSet in the specified zone. Arguments: zone_name -- The zone that will contain the new RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The TTL value for the RRSet. rdata -- The BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings.
def submit_populator_batch(self, column_name, batch): if not set(column_name).issubset(_allowedCustomDimensionChars): raise ValueError( % column_name) if len(column_name) < 3 or len(column_name) > 20: raise ValueError( % column_name) url = % (self.base_url, column_name) resp_json_dict = self._submit_batch(url, batch) if resp_json_dict.get() is not None: raise RuntimeError( % resp_json_dict[]) return resp_json_dict[]
Submit a populator batch Submit a populator batch as a series of HTTP requests in small chunks, returning the batch GUID, or raising exception on error.
def _get_texture(arr, default, n_items, from_bounds): if not hasattr(default, ): default = [default] n_cols = len(default) if arr is None: arr = np.tile(default, (n_items, 1)) assert arr.shape == (n_items, n_cols) arr = arr[np.newaxis, ...].astype(np.float64) assert arr.shape == (1, n_items, n_cols) assert len(from_bounds) == 2 m, M = map(float, from_bounds) assert np.all(arr >= m) assert np.all(arr <= M) arr = (arr - m) / (M - m) assert np.all(arr >= 0) assert np.all(arr <= 1.) return arr
Prepare data to be uploaded as a texture. The from_bounds must be specified.
def _ordered_node_addrs(self, function_address): try: function = self.kb.functions[function_address] except KeyError: return [ ] if function_address not in self._function_node_addrs: sorted_nodes = CFGUtils.quasi_topological_sort_nodes(function.graph) self._function_node_addrs[function_address] = [ n.addr for n in sorted_nodes ] return self._function_node_addrs[function_address]
For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an empty list. :param int function_address: Address of the function. :return: A ordered list of the nodes. :rtype: list