code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def load_table(self, table): region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split(, 1) boto_region_name = region.replace(, ) resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( .format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
Load resources as specified by given table into our db.
def _get_invalid_info(granule_data): if issubclass(granule_data.dtype.type, np.integer): msg = ("na:" + str((granule_data == 65535).sum()) + " miss:" + str((granule_data == 65534).sum()) + " obpt:" + str((granule_data == 65533).sum()) + " ogpt:" + str((granule_data == 65532).sum()) + " err:" + str((granule_data == 65531).sum()) + " elint:" + str((granule_data == 65530).sum()) + " vdne:" + str((granule_data == 65529).sum()) + " soub:" + str((granule_data == 65528).sum())) elif issubclass(granule_data.dtype.type, np.floating): msg = ("na:" + str((granule_data == -999.9).sum()) + " miss:" + str((granule_data == -999.8).sum()) + " obpt:" + str((granule_data == -999.7).sum()) + " ogpt:" + str((granule_data == -999.6).sum()) + " err:" + str((granule_data == -999.5).sum()) + " elint:" + str((granule_data == -999.4).sum()) + " vdne:" + str((granule_data == -999.3).sum()) + " soub:" + str((granule_data == -999.2).sum())) return msg
Get a detailed report of the missing data. N/A: not applicable MISS: required value missing at time of processing OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during SDR processing) OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed during EDR processing) ERR: error occurred during processing / non-convergence ELINT: ellipsoid intersect failed / instrument line-of-sight does not intersect the Earth’s surface VDNE: value does not exist / processing algorithm did not execute SOUB: scaled out-of-bounds / solution not within allowed range
def encode (self): return self.attrs.encode() + self.delay.encode() + self.cmd.encode()
Encodes this SeqCmd to binary and returns a bytearray.
def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: mm -= offset y, m, d, hh, mm, ss, _ = _normalize_datetime( y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0)
Return UTC time tuple compatible with time.gmtime().
def as_search_document_update(self, *, index, update_fields): if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL: return self.as_search_document(index=index) if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL: return { k: getattr(self, k) for k in self.clean_update_fields( index=index, update_fields=update_fields ) }
Return a partial update document based on which fields have been updated. If an object is saved with the `update_fields` argument passed through, then it is assumed that this is a 'partial update'. In this scenario we need a {property: value} dictionary containing just the fields we want to update. This method handles two possible update strategies - 'full' or 'partial'. The default 'full' strategy simply returns the value of `as_search_document` - thereby replacing the entire document each time. The 'partial' strategy is more intelligent - it will determine whether the fields passed are in the search document mapping, and return a partial update document that contains only those that are. In addition, if any field that _is_ included cannot be automatically serialized (e.g. a RelatedField object), then this method will raise a ValueError. In this scenario, you should override this method in your subclass. >>> def as_search_document_update(self, index, update_fields): ... if 'user' in update_fields: ... update_fields.remove('user') ... doc = super().as_search_document_update(index, update_fields) ... doc['user'] = self.user.get_full_name() ... return doc ... return super().as_search_document_update(index, update_fields) You may also wish to subclass this method to perform field-specific logic - in this example if only the timestamp is being saved, then ignore the update if the timestamp is later than a certain time. >>> def as_search_document_update(self, index, update_fields): ... if update_fields == ['timestamp']: ... if self.timestamp > today(): ... return {} ... return super().as_search_document_update(index, update_fields)
async def request(self, request): assert request.transaction_id not in self.transactions if self.integrity_key: self.__add_authentication(request) transaction = stun.Transaction(request, self.server, self) self.transactions[request.transaction_id] = transaction try: return await transaction.run() finally: del self.transactions[request.transaction_id]
Execute a STUN transaction and return the response.
def list_projects_search(self, searchstring): log.debug( % searchstring) return self.collection( % quote_plus(searchstring))
List projects with searchstring.
def gravitational_force(position_a, mass_a, position_b, mass_b): distance = distance_between(position_a, position_b) angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0]) magnitude = G * mass_a * mass_b / (distance**2) sign = -1 if mass_b > mass_a else 1 x_force = sign * magnitude * math.cos(angle) y_force = sign * magnitude * math.sin(angle) return x_force, y_force
Returns the gravitational force between the two bodies a and b.
def is_frameshift_len(mut_df): if in mut_df.columns: indel_len = mut_df[] else: indel_len = compute_indel_length(mut_df) is_fs = (indel_len%3)>0 is_indel = (mut_df[]==) | (mut_df[]==) is_fs[~is_indel] = False return is_fs
Simply returns a series indicating whether each corresponding mutation is a frameshift. This is based on the length of the indel. Thus may be fooled by frameshifts at exon-intron boundaries or other odd cases. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format Returns ------- is_fs : pd.Series pandas series indicating if mutaitons are frameshifts
def is_published(self): citeable = in self.record and \ is_citeable(self.record[]) submitted = in self.record and any( in el for el in force_list(self.record.get()) ) return citeable or submitted
Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True
def _pyuncompress_sqlitecurve(sqlitecurve, force=False): outfile = sqlitecurve.replace(,) try: if os.path.exists(outfile) and not force: return outfile else: with gzip.open(sqlitecurve,) as infd: with open(outfile,) as outfd: shutil.copyfileobj(infd, outfd) if os.path.exists(outfile): return outfile except Exception as e: return None
This just uncompresses the sqlitecurve. Should be independent of OS.
def handle_splits(self, splits): total_leftover_cash = 0 for asset, ratio in splits: if asset in self.positions: self._dirty_stats = True position = self.positions[asset] leftover_cash = position.handle_split(asset, ratio) total_leftover_cash += leftover_cash return total_leftover_cash
Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position.
def get_certificate_issuer(self, certificate_issuer_id, **kwargs): kwargs[] = True if kwargs.get(): return self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) else: (data) = self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) return data
Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread.
def delete_stack(self, stack_name): get_stack(stack_name) CLIENT.delete_stack( StackName=stack_name ) DELETE_WAITER.wait(StackName=stack_name)
Teardown a stack.
def collect(self): data_structure = { : ( , , , ), : ( , , , ), : ( , , , , , , ), : ( , , , ), : ( , , , ), : ( , , ), : ( , , , , , , ), : ( , , , , ), : ( , , , , , , , , , ), : ( , ), : ( , ), : ( , , , ), : ( , , ), : ( , , , , , , , ), : ( , , , , , , , , ), : ( , , , , , , , , , , , , , , ), : ( , , , , , , , , , , , , , , ), : ( , , , , , , , , , , , , , , ), : ( , , , , , , , , , , , , , , ), : ( , , , , , , , , , , , , , , ), : ( , ), : ( , , ), : ( , ) } f = open(self.PROC) new_stats = f.readlines() f.close() stats = {} for line in new_stats: items = line.rstrip().split() stats[items[0]] = [int(a) for a in items[1:]] for key in stats.keys(): for item in enumerate(data_structure[key]): metric_name = .join([key, item[1]]) value = stats[key][item[0]] self.publish_counter(metric_name, value)
Collect xfs stats. For an explanation of the following metrics visit http://xfs.org/index.php/Runtime_Stats https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h
def print_user(self, user): status = "active" token = user.token if token in [, ]: status = token if token is None: token = subid = "%s\t%s[%s]" %(user.id, token, status) print(subid) return subid
print a relational database user
def list_nodes(full=False, call=None): if call == : raise SaltCloudSystemExit( ) ret = {} if POLL_ALL_LOCATIONS: for location in JOYENT_LOCATIONS: result = query(command=, location=location, method=) if result[0] in VALID_RESPONSE_CODES: nodes = result[1] for node in nodes: if in node: node[] = location ret[node[]] = reformat_node(item=node, full=full) else: log.error(, result[1]) else: location = get_location() result = query(command=, location=location, method=) nodes = result[1] for node in nodes: if in node: node[] = location ret[node[]] = reformat_node(item=node, full=full) return ret
list of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q
def send_audio(chat_id, audio, caption=None, duration=None, performer=None, title=None, reply_to_message_id=None, reply_markup=None, disable_notification=False, parse_mode=None, **kwargs): files = None if isinstance(audio, InputFile): files = [audio] audio = None elif not isinstance(audio, str): raise Exception() params = dict( chat_id=chat_id, audio=audio ) params.update( _clean_params( caption=caption, duration=duration, performer=performer, title=title, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, disable_notification=disable_notification, parse_mode=parse_mode, ) ) return TelegramBotRPCRequest(, params=params, files=files, on_result=Message.from_result, **kwargs)
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice messages, use the sendVoice method instead. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. :param caption: Audio caption, 0-200 characters :param duration: Duration of the audio in seconds :param performer: Performer :param title: Track name :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, :param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. Other apps coming soon. :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type audio: InputFile or str :type caption: str :type duration: int :type performer: str :type title: str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :type parse_mode: str :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest
def add_reader(self, fd, callback): " Add read file descriptor to the event loop. " fd = fd_to_int(fd) self._read_fds[fd] = callback self.selector.register(fd)
Add read file descriptor to the event loop.
def _join_disease(query, disease_definition, disease_id, disease_name): if disease_definition or disease_id or disease_name: query = query.join(models.Disease) if disease_definition: query = query.filter(models.Disease.definition.like(disease_definition)) if disease_id: query = query.filter(models.Disease.disease_id == disease_id) if disease_name: query = query.filter(models.Disease.disease_name.like(disease_name)) return query
helper function to add a query join to Disease model :param sqlalchemy.orm.query.Query query: SQL Alchemy query :param disease_definition: :param str disease_id: see :attr:`models.Disease.disease_id` :param disease_name: :rtype: sqlalchemy.orm.query.Query
def receive_data(socket): answer = b"" while True: packet = socket.recv(4096) if not packet: break answer += packet response = pickle.loads(answer) socket.close() return response
Receive an answer from the daemon and return the response. Args: socket (socket.socket): A socket that is connected to the daemon. Returns: dir or string: The unpickled answer.
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta): delta_m = (mmax - mag_value) a_1 = self._get_a1_value(bbar, dbar, slip / 10., beta, mmax) return a_1 * np.exp(bbar * delta_m) * (delta_m > 0.0)
Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
def edit(self): input_params = { "name": self.name, "public_key": self.public_key, } data = self.get_data( "account/keys/%s" % self.id, type=PUT, params=input_params ) if data: self.id = data[][]
Edit the SSH Key
def open(self, *args, **kwargs): Telnet.open(self, *args, **kwargs) if self.force_ssl: self._start_tls()
Works exactly like the Telnet.open() call from the telnetlib module, except SSL/TLS may be transparently negotiated.
def geometric_progression_for_stepsize(x, update, dist, decision_function, current_iteration): epsilon = dist / np.sqrt(current_iteration) while True: updated = x + epsilon * update success = decision_function(updated[None])[0] if success: break else: epsilon = epsilon / 2.0 return epsilon
Geometric progression to search for stepsize. Keep decreasing stepsize by half until reaching the desired side of the boundary.
def cublasDsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy): status = _libcublas.cublasDsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric-banded matrix.
def create(): if not all(map(os.path.isdir, ARGS.directory)): exit() with sqlite3.connect(ARGS.database) as connection: connection.text_factory = str cursor = connection.cursor() cursor.execute() cursor.execute() for dir in ARGS.directory: cursor.executemany(, local_data(dir))
Create a new database with information about the films in the specified directory or directories.
def send(self, msg): debug(, msg) self.loop.send(self._packer.pack(msg))
Queue `msg` for sending to Nvim.
def protect(self, password=None, read_protect=False, protect_from=0): return super(Type1Tag, self).protect( password, read_protect, protect_from)
The implementation of :meth:`nfc.tag.Tag.protect` for a generic type 1 tag is limited to setting the NDEF data read-only for tags that are already NDEF formatted.
def getElementsByName(self, name): try: return filter(lambda x: x.name == name, self._lattice_eleobjlist) except: return []
get element with given name, return list of element objects regarding to 'name' :param name: element name, case sensitive, if elements are auto-generated from LteParser, the name should be lower cased.
def isPe64(self): if self.ntHeaders.optionalHeader.magic.value == consts.PE64: return True return False
Determines if the current L{PE} instance is a PE64 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}.
def permission_required(perm, login_url=None, raise_exception=False): def check_perms(user): if not getattr(settings, , app_settings.REQUIRE_LOGIN): return True if user.has_perm(perm): return True if raise_exception: raise PermissionDenied return False return user_passes_test(check_perms, login_url=login_url)
Re-implementation of the permission_required decorator, honors settings. If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return ``True``, otherwise it will check for the permission as usual.
def call_modules(auto_discover=()): for app in settings.INSTALLED_APPS: modules = set(auto_discover) if app in INSTALLED_APPS_REGISTER: modules.update(INSTALLED_APPS_REGISTER[app]) for module in modules: mod = import_module(app) try: import_module( % (app, module)) inst = getattr(mod, , lambda: None) inst() except: if module_has_submodule(mod, module): raise app_modules_loaded.send(sender=None)
this is called in project urls.py for registering desired modules (eg.: admin.py)
def prepare_synteny(tourfile, lastfile, odir, p, opts): qbedfile, sbedfile = get_bed_filenames(lastfile, p, opts) qbedfile = op.abspath(qbedfile) sbedfile = op.abspath(sbedfile) qbed = Bed(qbedfile, sorted=False) contig_to_beds = dict(qbed.sub_beds()) mkdir(odir, overwrite=True) os.chdir(odir) logging.debug("Change into subdir `{}`".format(odir)) anchorsfile = ".".join(op.basename(lastfile).split(".", 2)[:2]) \ + ".anchors" fw = open(anchorsfile, "w") for b in Blast(lastfile): print("\t".join((gene_name(b.query), gene_name(b.subject), str(int(b.score)))), file=fw) fw.close() symlink(sbedfile, op.basename(sbedfile)) return anchorsfile, qbedfile, contig_to_beds
Prepare synteny plots for movie().
def hpx_to_coords(h, shape): x, z = hpx_to_axes(h, shape) x = np.sqrt(x[0:-1] * x[1:]) z = z[:-1] + 0.5 x = np.ravel(np.ones(shape) * x[:, np.newaxis]) z = np.ravel(np.ones(shape) * z[np.newaxis, :]) return np.vstack((x, z))
Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.
def getloansurl(idcred, *args, **kwargs): getparams = [] if kwargs: try: if kwargs["fullDetails"] == True: getparams.append("fullDetails=true") else: getparams.append("fullDetails=false") except Exception as ex: pass try: getparams.append("accountState=%s" % kwargs["accountState"]) except Exception as ex: pass try: getparams.append("branchId=%s" % kwargs["branchId"]) except Exception as ex: pass try: getparams.append("centreId=%s" % kwargs["centreId"]) except Exception as ex: pass try: getparams.append("creditOfficerUsername=%s" % kwargs["creditOfficerUsername"]) except Exception as ex: pass try: getparams.append("offset=%s" % kwargs["offset"]) except Exception as ex: pass try: getparams.append("limit=%s" % kwargs["limit"]) except Exception as ex: pass idcredparam = "" if idcred == "" else "/"+idcred url = getmambuurl(*args,**kwargs) + "loans" + idcredparam + ("" if len(getparams) == 0 else "?" + "&".join(getparams) ) return url
Request Loans URL. If idcred is set, you'll get a response adequate for a MambuLoan object. If not set, you'll get a response adequate for a MambuLoans object. See mambuloan module and pydoc for further information. Currently implemented filter parameters: * fullDetails * accountState * branchId * centreId * creditOfficerUsername * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future.
def requires_permission(permission): def inner(f): def is_available_here(): if not hasattr(current_auth, ): return False elif is_collection(permission): return bool(current_auth.permissions.intersection(permission)) else: return permission in current_auth.permissions def is_available(context=None): result = is_available_here() if result and hasattr(f, ): if not is_available_here(): abort(403) return f(*args, **kwargs) wrapper.requires_permission = permission wrapper.is_available = is_available return wrapper return inner
View decorator that requires a certain permission to be present in ``current_auth.permissions`` before the view is allowed to proceed. Aborts with ``403 Forbidden`` if the permission is not present. The decorated view will have an ``is_available`` method that can be called to perform the same test. :param permission: Permission that is required. If a collection type is provided, any one permission must be available
def emit(self, span_datas): envelopes = [self.span_data_to_envelope(sd) for sd in span_datas] result = self._transmit(envelopes) if result > 0: self.storage.put(envelopes, result)
:type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit
def __substitute_objects(self, value, context_dict): if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: e = sys.exc_info()[1] logger.warn("Could not specialize %s! Error: %s" % (value, e)) return value else: return value
recursively substitute value with the context_dict
def visitAnnotation(self, ctx: ShExDocParser.AnnotationContext): annot = Annotation(self.context.predicate_to_IRI(ctx.predicate())) if ctx.iri(): annot.object = self.context.iri_to_iriref(ctx.iri()) else: annot.object = self.context.literal_to_ObjectLiteral(ctx.literal()) self.annotations.append(annot)
annotation: '//' predicate (iri | literal)
def filter_url(pkg_type, url): bad_stuff = ["?modtime", " for junk in bad_stuff: if junk in url: url = url.split(junk)[0] break if url.endswith("-dev"): url = url.split(" if pkg_type == "all": return url elif pkg_type == "source": valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"] for extension in valid_source_types: if url.lower().endswith(extension): return url elif pkg_type == "egg": if url.lower().endswith(".egg"): return url
Returns URL of specified file type 'source', 'egg', or 'all'
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None): return self.anim.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
:param frame: current frame :param birthframe: frame where this animation starts returning something other than None :param startframe: frame where animation starts to evolve :param stopframe: frame where animation is completed :param deathframe: frame where animation starts to return None :return:
def update(self, identifier, new_instance): old_instance = self.retrieve(identifier) old_encrypted_identifier = old_instance.encrypted_identifier if ( new_instance.encryption_context_key and old_instance.encryption_context_key != new_instance.encryption_context_key ): raise ValueError("Cannot change encryption context key") if new_instance.plaintext is None and new_instance.encrypted_relationship is None: result = super().update(identifier, new_instance) self.expunge(result) return result self.expunge(result) result.plaintext = expected_new_plaintext return result
Update an encryptable field, make sure that: * We won't change the encryption context key * The new value is going to be encrypted * The return instance.plaintext is the updated one Note: Will expunge the returned instance
def merge(self): temp_dir = self.temp_dir app_dir = self.application.directory for root, dirs, files in os.walk(temp_dir): for directory in dirs: directory = os.path.join(root, directory) directory = directory.replace(temp_dir, app_dir, 1) try: os.mkdir(directory) except OSError: pass for file in files: source = os.path.join(root, file) target = source.replace(temp_dir, app_dir, 1) relative_target = target.replace(app_dir, ) action = if ( os.path.exists(target) and not filecmp.cmp(source, target, shallow=False) and os.stat(target).st_size > 0 ): if target.endswith(): action = elif target.endswith(): action = else: default = action = click.prompt( style.prompt( % ( relative_target ), ), default=style.default(default) ) if self.interactive else default action = click.unstyle(action).lower() if action not in {, , }: action = default if action == : self.stdout.write( % style.white(relative_target), fg= ) continue if action == : with open(source, ) as source_file: with open(target, ) as target_file: target_file.write(source_file.read()) self.stdout.write( style.green( % style.white(relative_target) ) ) if action == : with open(target, ) as target_file: with open(source, ) as source_file: merged = merge( target_file.read(), source_file.read() ) with open(target, ) as target_file: target_file.write(merged) self.stdout.write( style.yellow( % style.white(relative_target)) )
Merges the rendered blueprint into the application.
def sum(self, property): self.__prepare() total = 0 for i in self._json_data: total += i.get(property) return total
Getting the sum according to the given property :@param property :@type property: string :@return int/float
def get_url_name(self, action_url_name="list"): url_name = "{}-{}".format(self.basename, action_url_name) namespace = self.request.resolver_match.namespace if namespace: url_name = "{}:{}".format(namespace, url_name) return url_name
Get full namespaced url name to use for reverse()
def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs): s name. address: A tuple containing address data for the capturer. Check the :class:`SocketStreamCapturer` documentation for what is required. conn_type: A string defining the connection type. Check the :class:`SocketStreamCapturer` documentation for a list of valid options. log_dir_path: An optional path defining the directory where the capturer should write its files. If this isn capture_handler_conf = kwargs if not log_dir_path: log_dir_path = self._mngr_conf[] log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path)) capture_handler_conf[] = log_dir_path capture_handler_conf[] = name if not in capture_handler_conf: capture_handler_conf[] = True transforms = [] if in capture_handler_conf: for transform in capture_handler_conf[]: if isinstance(transform, str): if globals().has_key(transform): transforms.append(globals().get(transform)) else: msg = ( ).format( transform, capture_handler_conf[] ) log.warn(msg) elif hasattr(transform, ): transforms.append(transform) else: msg = ( ).format(transform) log.warn(msg) capture_handler_conf[] = transforms address_key = str(address) if address_key in self._stream_capturers: capturer = self._stream_capturers[address_key][0] capturer.add_handler(capture_handler_conf) return socket_logger = SocketStreamCapturer(capture_handler_conf, address, conn_type) greenlet = gevent.spawn(socket_logger.socket_monitor_loop) self._stream_capturers[address_key] = ( socket_logger, greenlet ) self._pool.add(greenlet)
Add a new stream capturer to the manager. Add a new stream capturer to the manager with the provided configuration details. If an existing capturer is monitoring the same address the new handler will be added to it. Args: name: A string defining the new capturer's name. address: A tuple containing address data for the capturer. Check the :class:`SocketStreamCapturer` documentation for what is required. conn_type: A string defining the connection type. Check the :class:`SocketStreamCapturer` documentation for a list of valid options. log_dir_path: An optional path defining the directory where the capturer should write its files. If this isn't provided the root log directory from the manager configuration is used.
def enter(self, container_alias): title = % self.__class__.__name__ input_fields = { : container_alias } for key, value in input_fields.items(): object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) from os import system sys_cmd = % container_alias if self.localhost.os.sysname in (): sys_cmd = % sys_cmd system(sys_cmd)
a method to open up a terminal inside a running container :param container_alias: string with name or id of container :return: None
def format_pixels( top, bottom, reset=True, repeat=1 ): top_src = None if isinstance( top, int ): top_src = top else: top_rgba = colour.normalise_rgba( top ) if top_rgba[3] != 0: top_src = top_rgba bottom_src = None if isinstance( bottom, int ): bottom_src = bottom else: bottom_rgba = colour.normalise_rgba( bottom ) if bottom_rgba[3] != 0: bottom_src = bottom_rgba if (top_src is None) and (bottom_src is None): return *repeat string = *repeat; colour_format = [] if top_src == bottom_src: string = *repeat elif (top_src is None) and (bottom_src is not None): string = *repeat if (top_src is None) and (bottom_src is not None): if isinstance( bottom_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *bottom_src[:3] ) ) else: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( top_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *top_src[:3] ) ) if top_src is not None and bottom_src is not None and top_src != bottom_src: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_BACKGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_BACKGROUND_CMD.format( *bottom_src[:3] ) ) colour_format = ANSI_FORMAT_BASE.format( .join( colour_format ) ) reset_format = if not reset else ANSI_FORMAT_RESET return .format( colour_format, string, reset_format )
Return the ANSI escape sequence to render two vertically-stacked pixels as a single monospace character. top Top colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bottom Bottom colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) repeat Number of horizontal pixels to render (default: 1)
def callback(self): self._callback(*self._args, **self._kwargs) self._last_checked = time.time()
Run the callback
def propose_unif(self): while True: u, idx, q = self.mell.sample(rstate=self.rstate, return_q=True) if unitcheck(u, self.nonperiodic): if q == 1 or self.rstate.rand() < 1.0 / q: break return u, self.mell.ells[idx].axes
Propose a new live point by sampling *uniformly* within the union of ellipsoids.
def set_windows_permissions(filename): if os.name == : try: everyone, domain, type = win32security.LookupAccountName( "", "Everyone") except Exception: everyone, domain, type = win32security.LookupAccountName ("", win32api.GetUserName()) sd = win32security.GetFileSecurity( filename, win32security.DACL_SECURITY_INFORMATION) dacl = sd.GetSecurityDescriptorDacl() dacl.AddAccessAllowedAce( win32security.ACL_REVISION, con.FILE_ALL_ACCESS, everyone) sd.SetSecurityDescriptorDacl(1, dacl, 0) win32security.SetFileSecurity( filename, win32security.DACL_SECURITY_INFORMATION, sd)
At least on windows 7 if a file is created on an Admin account, Other users will not be given execute or full control. However if a user creates the file himself it will work... So just always change permissions after creating a file on windows Change the permissions for Allusers of the application The Everyone Group Full access http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html
def get_longitude_variables(nc): longitude_variables = [] for variable in nc.get_variables_by_attributes(standard_name="longitude"): longitude_variables.append(variable.name) for variable in nc.get_variables_by_attributes(axis=): if variable.name not in longitude_variables: longitude_variables.append(variable.name) check_fn = partial(attr_membership, value_set=VALID_LON_UNITS, modifier_fn=lambda s: s.lower()) for variable in nc.get_variables_by_attributes(units=check_fn): if variable.name not in longitude_variables: longitude_variables.append(variable.name) return longitude_variables
Returns a list of all variables matching definitions for longitude :param netcdf4.dataset nc: an open netcdf dataset object
def ping(self): msg = "PING\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error pinging server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply
Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string
def default_reset_type(self): try: resetSequence = self._info.debugs[0].attrib[] if resetSequence == : return Target.ResetType.HW elif resetSequence == : return Target.ResetType.SW_SYSRESETREQ elif resetSequence == : return Target.ResetType.SW_VECTRESET else: return Target.ResetType.SW except (KeyError, IndexError): return Target.ResetType.SW
! @brief One of the Target.ResetType enums. @todo Support multiple cores.
def close(self): if self._closing or self._handle.closed: return self._closing = True self._write_backlog.append([b, False]) self._process_write_backlog()
Cleanly shut down the SSL protocol and close the transport.
def write_all(self): [self.write(k) for k in six.iterkeys(self.templates)]
Write out all registered config files.
def set_lan_port(self, port_id, mac=None): port_handler = _parse_physical_port_id(port_id) port = self._find_port(port_handler) if port: port_handler.set_lan_port(port, mac) else: self._add_port(port_handler, port_handler.create_lan_port(mac))
Set LAN port information to configuration. :param port_id: Physical port ID. :param mac: virtual MAC address if virtualization is necessary.
def keys(name, basepath=, **kwargs): ret = {: name, : {}, : True, : } pillar_kwargs = {} for key, value in six.iteritems(kwargs): pillar_kwargs[.format(key)] = value pillar = __salt__[]({: }, pillar_kwargs) paths = { : os.path.join(basepath, , , ), : os.path.join(basepath, , ), : os.path.join(basepath, , , ), : os.path.join(basepath, , ), : os.path.join(basepath, , ) } for key in paths: p_key = .format(key) if p_key not in pillar: continue if not os.path.exists(os.path.dirname(paths[key])): os.makedirs(os.path.dirname(paths[key])) if os.path.isfile(paths[key]): with salt.utils.files.fopen(paths[key], ) as fp_: if salt.utils.stringutils.to_unicode(fp_.read()) != pillar[p_key]: ret[][key] = else: ret[][key] = if not ret[]: ret[] = elif __opts__[]: ret[] = None ret[] = ret[] = {} else: for key in ret[]: with salt.utils.files.fopen(paths[key], ) as fp_: fp_.write( salt.utils.stringutils.to_str( pillar[.format(key)] ) ) ret[] = return ret
Manage libvirt keys. name The name variable used to track the execution basepath Defaults to ``/etc/pki``, this is the root location used for libvirt keys on the hypervisor The following parameters are optional: country The country that the certificate should use. Defaults to US. .. versionadded:: 2018.3.0 state The state that the certificate should use. Defaults to Utah. .. versionadded:: 2018.3.0 locality The locality that the certificate should use. Defaults to Salt Lake City. .. versionadded:: 2018.3.0 organization The organization that the certificate should use. Defaults to Salted. .. versionadded:: 2018.3.0 expiration_days The number of days that the certificate should be valid for. Defaults to 365 days (1 year) .. versionadded:: 2018.3.0
def get_episode_title(episode: Episode) -> int: for title in episode.titles: if title.lang == : return title.title else: return episode.titles[0].title
Get the episode title. Japanese title is prioritized.
def schema(self, sources=None, tables=None, clean=False, force=False, use_pipeline=False): from itertools import groupby from operator import attrgetter from ambry.etl import Collect, Head from ambry.orm.exc import NotFoundError self.dstate = self.STATES.BUILDING self.commit() self.log() resolved_sources = self._resolve_sources(sources, tables, predicate=lambda s: s.is_processable) if clean: self.dataset.delete_tables_partitions() self.commit() keyfunc = attrgetter() for t, table_sources in groupby(sorted(resolved_sources, key=keyfunc), keyfunc): if use_pipeline: for source in table_sources: pl = self.pipeline(source) pl.cast = [ambry.etl.CastSourceColumns] pl.select_partition = [] pl.write = [Head, Collect] pl.final = [] self.log_pipeline(pl) pl.run() pl.phase = self.log_pipeline(pl) for h, c in zip(pl.write[Collect].headers, pl.write[Collect].rows[1]): c = t.add_column(name=h, datatype=type(c).__name__ if c is not None else , update_existing=True) self.log("Populated destination table from pipeline " .format(t.name, pl.name)) else: self.commit() def source_cols(source): if source.is_partition and not source.source_table_exists: return enumerate(source.partition.table.columns) else: return enumerate(source.source_table.columns) columns = sorted(set([(i, col.dest_header, col.datatype, col.description, col.has_codes) for source in table_sources for i, col in source_cols(source)])) initial_count = len(t.columns) for pos, name, datatype, desc, has_codes in columns: kwds = dict( name=name, datatype=datatype, description=desc, update_existing=True ) try: extant = t.column(name) except NotFoundError: extant = None if extant is None or not extant.description: kwds[] = desc c = t.add_column(**kwds) final_count = len(t.columns) if final_count > initial_count: diff = final_count - initial_count self.log("Populated destination table from source table with {} columns" .format(t.name, source.source_table.name, diff)) self.commit() return True
Generate destination schemas. :param sources: If specified, build only destination tables for these sources :param tables: If specified, build only these tables :param clean: Delete tables and partitions first :param force: Population tables even if the table isn't empty :param use_pipeline: If True, use the build pipeline to determine columns. If False, :return: True on success.
def update(self, attributes=None): if attributes is None: attributes = {} attributes[] = self.sys[].id return super(Entry, self).update(attributes)
Updates the entry with attributes.
def time(value): if isinstance(value, str): value = to_time(value) return literal(value, type=dt.time)
Returns a time literal if value is likely coercible to a time Parameters ---------- value : time value as string Returns -------- result : TimeScalar
def _fmt_structured(d): timeEntry = datetime.datetime.utcnow().strftime( "time=%Y-%m-%dT%H:%M:%S.%f-00") pidEntry = "pid=" + str(os.getpid()) rest = sorted(.join([str(k), str(v)]) for (k, v) in list(d.items())) return .join([timeEntry, pidEntry] + rest)
Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2' Output is lexically sorted, *except* the time and pid always come first, to assist with human scanning of the data.
def execute(self, conn, primary_ds_name="", primary_ds_type="", transaction=False): sql = self.sql binds = {} if primary_ds_name and primary_ds_type in (, None, ): op = ("=", "like")["%" in primary_ds_name] sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name" % op binds.update(primary_ds_name=primary_ds_name) elif primary_ds_type and primary_ds_name in (, None, ): op = ("=", "like")["%" in primary_ds_type] sql += "WHERE PT.PRIMARY_DS_TYPE %s :primary_ds_type" % op binds.update(primary_ds_type=primary_ds_type) elif primary_ds_name and primary_ds_type: op = ("=", "like")["%" in primary_ds_name] op1 = ("=", "like")["%" in primary_ds_type] sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name and PT.PRIMARY_DS_TYPE %s :primary_ds_type"\ %(op, op1) binds.update(primary_ds_name=primary_ds_name) binds.update(primary_ds_type=primary_ds_type) else: pass cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
Lists all primary datasets if pattern is not provided.
def batch_parameters(self, saa, sza, p, x, y, g, s, z): self.saa_list = saa self.sza_list = sza self.p_list = p self.x_list = x self.y_list = y self.g_list = g self.s_list = s self.z_list = z
Takes lists for parameters and saves them as class properties :param saa: <list> Sun Azimuth Angle (deg) :param sza: <list> Sun Zenith Angle (deg) :param p: <list> Phytoplankton linear scalling factor :param x: <list> Scattering scaling factor :param y: <list> Scattering slope factor :param g: <list> CDOM absorption scaling factor :param s: <list> CDOM absorption slope factor :param z: <list> depth (m)
def set_random_starting_grid(lfe): cls_patterns = mod_grid.GameOfLifePatterns(25) print(cls_patterns) exit(0) patterns = cls_patterns.get_patterns() for pattern in patterns: lfe.set_tile(pattern[0], pattern[1], 1)
generate a random grid for game of life using a set of patterns (just to make it interesting)
def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, **kwargs): from ..operation import histogram if not isinstance(dimension, list): dimension = [dimension] hists = [] for d in dimension[::-1]: hist = histogram(self, num_bins=num_bins, bin_range=bin_range, dimension=d, **kwargs) hists.append(hist) if adjoin: layout = self for didx in range(len(dimension)): layout = layout << hists[didx] elif len(dimension) > 1: layout = Layout(hists) else: layout = hists[0] return layout
Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram
def CollectData(self): while 1: _bytes = self._ReadPacket() if not _bytes: return None if len(_bytes) < 4 + 8 + 1 or _bytes[0] < 0x20 or _bytes[0] > 0x2F: logging.warning("Wanted data, dropped type=0x%02x, len=%d", _bytes[0], len(_bytes)) continue seq, _type, x, y = struct.unpack("BBBB", _bytes[:4]) data = [ struct.unpack(">hhhh", _bytes[x:x + 8]) for x in range(4, len(_bytes) - 8, 8) ] if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF: logging.warning("Data sequence skipped, lost packet?") self._last_seq = seq if _type == 0: if not self._coarse_scale or not self._fine_scale: logging.warning( "Waiting for calibration, dropped data packet.") continue out = [] for main, usb, aux, voltage in data: if main & 1: coarse = ((main & ~1) - self._coarse_zero) out.append(coarse * self._coarse_scale) else: out.append((main - self._fine_zero) * self._fine_scale) return out elif _type == 1: self._fine_zero = data[0][0] self._coarse_zero = data[1][0] elif _type == 2: self._fine_ref = data[0][0] self._coarse_ref = data[1][0] else: logging.warning("Discarding data packet type=0x%02x", _type) continue if self._coarse_ref != self._coarse_zero: self._coarse_scale = 2.88 / ( self._coarse_ref - self._coarse_zero) if self._fine_ref != self._fine_zero: self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
Return some current samples. Call StartDataCollection() first.
def __retrieve(self, key): with self.get_conn() as conn: try: c = conn.cursor() if key is None: c.execute("SELECT value FROM cache_entries WHERE key IS NULL") else: c.execute("SELECT value FROM cache_entries WHERE key = ?", (key,)) result = c.fetchone() if result is None or len(result) != 1: getLogger().info("There's no entry with key={key}".format(key=key)) return None else: return result[0] except: getLogger().exception("Cannot retrieve") return None
Retrieve file location from cache DB
def setup_logging(): fmt = handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(fmt)) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) root_logger.addHandler(handler_stderr)
Called when __name__ == '__main__' below. Sets up logging library. All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages.
def run(self, world: int) -> IO: filename, func = self._get_value() f = self.open_func(filename) action = func(f.read()) return action(world=world + 1)
Run IO Action
def generic_visit(self, node: ast.AST) -> None: raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
Raise an exception that this node has not been handled.
def profile_create(name, config=None, devices=None, description=None, remote_addr=None, cert=None, key=None, verify_cert=True): keyboot.autostartvaluekeysecurity.privilegedvalue1**disk/home/shared/home/shared client = pylxd_client_get(remote_addr, cert, key, verify_cert) config, devices = normalize_input_values( config, devices ) try: profile = client.profiles.create(name, config, devices) except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) if description is not None: profile.description = description pylxd_save_object(profile) return _pylxd_model_to_dict(profile)
Creates a profile. name : The name of the profile to get. config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). description : A description string or None (None = unset). remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}" $ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}" See the `lxd-docs`_ for the details about the config and devices dicts. .. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10
def centroids(self, window_size=5): self.sortByMass() mzs = _cffi_buffer(self.masses, ) intensities = _cffi_buffer(self.intensities, ) n = self.size p = ims.spectrum_new_from_raw(n, mzs.ptr, intensities.ptr, int(window_size)) return _new_spectrum(CentroidedSpectrum, p)
Detects peaks in raw data. :param mzs: sorted array of m/z values :param intensities: array of corresponding intensities :param window_size: size of m/z averaging window :returns: isotope pattern containing the centroids :rtype: CentroidedSpectrum
def write(self, data): logger.debug( % data) if not isinstance(data, bytes): raise TypeError() if len(data) != 1: msg = raise ValueError(msg % len(data)) self._input_buffer.extend(data) l = len(self._query_eom) if not self._input_buffer.endswith(self._query_eom): return try: message = bytes(self._input_buffer[:-l]) queries = (message.split(self.delimiter) if self.delimiter else [message]) for query in queries: response = self._match(query) eom = self._response_eom if response is None: response = self.error_response() if response is not NoResponse: self._output_buffer.extend(response) self._output_buffer.extend(eom) finally: self._input_buffer = bytearray()
Write data into the device input buffer. :param data: single element byte :type data: bytes
def is_time_included(self, time): if self._timestamps_data is None: self._calculate_timestamps() return time.moy in self._timestamps_data
Check if time is included in analysis period. Return True if time is inside this analysis period, otherwise return False Args: time: A DateTime to be tested Returns: A boolean. True if time is included in analysis period
def get_all_locations(self, timeout: int=None): url = self.api.LOCATIONS return self._get_model(url, timeout=timeout)
Get a list of all locations Parameters ---------- timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def read_csv(text, sep="\t"): import pandas as pd return pd.read_csv(StringIO(text), sep="\t")
Create a DataFrame from CSV text
def run(self, files, working_area): results = {name: None for name in self.check_names} checks_root = working_area.parent with futures.ProcessPoolExecutor() as executor: not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root)) for name, _ in self.child_map[None]) not_passed = [] while not_done: done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED) for future in done: result, state = future.result() results[result.name] = result if result.passed: for child_name, _ in self.child_map[result.name]: not_done.add(executor.submit( run_check(child_name, self.checks_spec, checks_root, state))) else: not_passed.append(result.name) for name in not_passed: self._skip_children(name, results) return results.values()
Run checks concurrently. Returns a list of CheckResults ordered by declaration order of the checks in the imported module
def read_unicode_csv_fileobj(fileobj, delimiter=, quotechar=, quoting=csv.QUOTE_MINIMAL, lineterminator=, encoding=, skiprows=0): if sys.version_info[0] >= 3: csv_reader = csv.reader(fileobj, delimiter=delimiter, quotechar=quotechar, quoting=quoting, lineterminator=lineterminator) for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield row else: csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding), quotechar=quotechar.encode(encoding), quoting=quoting, lineterminator=lineterminator) for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield [cell.decode(encoding) for cell in row]
fileobj can be a StringIO in Py3, but should be a BytesIO in Py2.
def _pnorm_diagweight(x, p, w): order = if all(a.flags.f_contiguous for a in (x.data, w)) else xp = np.abs(x.data.ravel(order)) if p == float(): xp *= w.ravel(order) return np.max(xp) else: xp = np.power(xp, p, out=xp) xp *= w.ravel(order) return np.sum(xp) ** (1 / p)
Diagonally weighted p-norm implementation.
def upload_image(self, subreddit, image_path, name=None, header=False, upload_as=None): if name and header: raise TypeError() if upload_as not in (None, , ): raise TypeError("upload_as must be , , or None.") with open(image_path, ) as image: image_type = upload_as or _image_type(image) data = {: six.text_type(subreddit), : image_type} if header: data[] = 1 else: if not name: name = os.path.splitext(os.path.basename(image.name))[0] data[] = name response = json.loads(self._request( self.config[], data=data, files={: image}, method=to_native_string(), retry_on_error=False)) if response[]: raise errors.APIException(response[], None) return response[]
Upload an image to the subreddit. :param image_path: A path to the jpg or png image you want to upload. :param name: The name to provide the image. When None the name will be filename less any extension. :param header: When True, upload the image as the subreddit header. :param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this will match the format of the image itself. In all cases where both this value and the image format is not png, reddit will also convert the image mode to RGBA. reddit optimizes the image according to this value. :returns: A link to the uploaded image. Raises an exception otherwise.
def get_departures(self, stop_id, route, destination, api_key): self.stop_id = stop_id self.route = route self.destination = destination self.api_key = api_key url = \ \ \ \ + self.stop_id \ + auth = + self.api_key header = {: , : auth} try: response = requests.get(url, headers=header, timeout=10) except: logger.warning("Network or Timeout error") return self.info if response.status_code != 200: logger.warning("Error with the request sent; check api key") return self.info result = response.json() try: result[] except KeyError: logger.warning("No stop events for this query") return self.info maxresults = 1 monitor = [] if self.destination != : for i in range(len(result[])): destination = result[][i][][][] if destination == self.destination: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: break elif self.route != : for i in range(len(result[])): number = result[][i][][] if number == self.route: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: break else: for i in range(0, maxresults): event = self.parseEvent(result, i) if event != None: monitor.append(event) if monitor: self.info = { ATTR_STOP_ID: self.stop_id, ATTR_ROUTE: monitor[0][0], ATTR_DUE_IN: monitor[0][1], ATTR_DELAY: monitor[0][2], ATTR_REALTIME: monitor[0][5], ATTR_DESTINATION: monitor[0][6], ATTR_MODE: monitor[0][7] } return self.info
Get the latest data from Transport NSW.
def linkify(self, modules): logger.debug("Linkify %s with %s", self, modules) self.linkify_s_by_module(modules)
Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None
def get_recent_state(self, current_observation): state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations
def _count(dicts): counts = defaultdict(int) for d in dicts: for k, v in d.items(): counts[k] += v return counts
Merge a list of dicts, summing their values.
def iplot_hys(fignum, B, M, s): if fignum != 0: plt.figure(num=fignum) plt.clf() hpars = {} Npts = len(M) B70 = 0.7 * B[0] for b in B: if b < B70: break Nint = B.index(b) - 1 if Nint > 30: Nint = 30 if Nint < 10: Nint = 10 Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], [] Mazero = "" m_init = 0.5 * (M[0] + M[1]) m_fin = 0.5 * (M[-1] + M[-2]) diff = m_fin - m_init Bmin = 0. for k in range(Npts): frac = old_div(float(k), float(Npts - 1)) Mfix.append((M[k] - diff * frac)) if Bzero == "" and B[k] < 0: Bzero = k if B[k] < Bmin: Bmin = B[k] kmin = k Bslop = B[2:Nint + 2] Mslop = Mfix[2:Nint + 2] polyU = polyfit(Bslop, Mslop, 1) Bslop = B[kmin:kmin + (Nint + 1)] Mslop = Mfix[kmin:kmin + (Nint + 1)] polyL = polyfit(Bslop, Mslop, 1) xhf = 0.5 * (polyU[0] + polyL[0]) hpars[] = % (xhf * 4 * np.pi * 1e-7) meanint = 0.5 * (polyU[1] + polyL[1]) Msat = 0.5 * (polyU[1] - polyL[1]) Moff = [] for k in range(Npts): Moff.append((Mfix[k] - xhf * B[k] - meanint)) if Mzero == "" and Moff[k] < 0: Mzero = k if Mzero != "" and Mazero == "" and Moff[k] > 0: Mazero = k hpars[] = % (Msat) Mupper, Bupper, Mlower, Blower = [], [], [], [] deltaM, Bdm = [], [] for k in range(kmin - 2, 0, -2): Mupper.append(old_div(Moff[k], Msat)) Bupper.append(B[k]) for k in range(kmin + 2, len(B)-1): Mlower.append(Moff[k] / Msat) Blower.append(B[k]) Iupper = spline.Spline(Bupper, Mupper) Ilower = spline.Spline(Blower, Mlower) for b in np.arange(B[0]): Mpos = ((Iupper(b) - Ilower(b))) Mneg = ((Iupper(-b) - Ilower(-b))) Bdm.append(b) deltaM.append(0.5 * (Mpos + Mneg)) print() for k in range(Npts): MadjN.append(old_div(Moff[k], Msat)) Mnorm.append(old_div(M[k], Msat)) Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.)) hpars[] = % (Mr) Bz = B[Mzero - 1:Mzero + 1] Mz = Moff[Mzero - 1:Mzero + 1] Baz = B[Mazero - 1:Mazero + 1] Maz = Moff[Mazero - 1:Mazero + 1] try: poly = polyfit(Bz, Mz, 1) Bc = old_div(-poly[1], poly[0]) poly = polyfit(Baz, Maz, 1) Bac = old_div(-poly[1], poly[0]) hpars[] = % (0.5 * (abs(Bc) + abs(Bac))) except: hpars[] = return hpars, deltaM, Bdm, B, Mnorm, MadjN
function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name
def setText(self, label, default=, description=, format=): obj = self.load(label) if obj == None: obj=default self.save(obj, label) textw = Text(value=obj, description=description) hndl = interact(self.save, obj=textw, label=fixed(label), format=fixed(format))
Set text in a notebook pipeline (via interaction or with nbconvert)
def historical_identifier(self): hist = chained_get(self._json, ["coredata", ], []) return [d[].split(":")[-1] for d in hist] or None
Scopus IDs of previous profiles now compromising this profile.
def port_knock_tcp(self, host="localhost", port=22, timeout=15): try: connect_host = socket.gethostbyname(host) host_human = "{} ({})".format(connect_host, host) except socket.error as e: self.log.warn( .format(host, e)) connect_host = host host_human = connect_host try: knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) knock.settimeout(timeout) knock.connect((connect_host, port)) knock.close() self.log.debug( .format(host_human, port)) return True except socket.error as e: self.log.debug( .format(host_human, port, e)) return False
Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed
def make_raw_content_url(repo_slug, git_ref, file_path): if isinstance(repo_slug, RepoSlug): slug_str = repo_slug.full else: slug_str = repo_slug if file_path.startswith(): file_path = file_path.lstrip() template = return template.format( slug=slug_str, git_ref=git_ref, path=file_path)
Make a raw content (raw.githubusercontent.com) URL to a file. Parameters ---------- repo_slug : `str` or `RepoSlug` The repository slug, formatted as either a `str` (``'owner/name'``) or a `RepoSlug` object (created by `parse_repo_slug_from_url`). git_ref : `str` The git ref: a branch name, commit hash, or tag name. file_path : `str` The POSIX path of the file in the repository tree.
async def create_server( self, host: Optional[str] = None, port: Optional[int] = None, debug: bool = False, ssl: Union[dict, SSLContext, None] = None, sock: Optional[socket] = None, protocol: Type[Protocol] = None, backlog: int = 100, stop_event: Any = None, access_log: Optional[bool] = None, return_asyncio_server=False, asyncio_server_kwargs=None, ) -> None: if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = ( WebSocketProtocol if self.websocket_enabled else HttpProtocol ) if stop_event is not None: if debug: warnings.simplefilter("default") warnings.warn( "stop_event will be removed from future versions.", DeprecationWarning, ) if access_log is not None: self.config.ACCESS_LOG = access_log server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, loop=get_event_loop(), protocol=protocol, backlog=backlog, run_async=return_asyncio_server, ) await self.trigger_events( server_settings.get("before_start", []), server_settings.get("loop"), ) return await serve( asyncio_server_kwargs=asyncio_server_kwargs, **server_settings )
Asynchronous version of :func:`run`. This method will take care of the operations necessary to invoke the *before_start* events via :func:`trigger_events` method invocation before starting the *sanic* app in Async mode. .. note:: This does not support multiprocessing and is not the preferred way to run a :class:`Sanic` application. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl:SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param stop_event: event to be triggered before stopping the app - deprecated :type stop_event: None :param access_log: Enables writing access logs (slows server) :type access_log: bool :param return_asyncio_server: flag that defines whether there's a need to return asyncio.Server or start it serving right away :type return_asyncio_server: bool :param asyncio_server_kwargs: key-value arguments for asyncio/uvloop create_server method :type asyncio_server_kwargs: dict :return: Nothing
def del_hparam(self, name): if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter.
def _get_encodings(): stdout_encoding = sys.stdout.encoding if sys.stdout.encoding else stderr_encoding = sys.stderr.encoding if sys.stderr.encoding else return stdout_encoding, stderr_encoding
Just a simple function to return the system encoding (defaults to utf-8)
def init_from_files( vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1e6, reserved_tokens=None): if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS if tf.gfile.Exists(vocab_file): tf.logging.info("Vocab file already exists (%s)" % vocab_file) else: tf.logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) tf.logging.info("Generated vocabulary with %d subtokens." % len(subtoken_list)) mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file)
Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string tokens that are guaranteed to be at the beginning of the subtoken vocabulary list. Returns: Subtokenizer object
def _merge_lib_dict(d1, d2): for required, requirings in d2.items(): if required in d1: d1[required].update(requirings) else: d1[required] = requirings return None
Merges lib_dict `d2` into lib_dict `d1`
def _get_param_iterator(self): return model_selection.ParameterSampler( self.param_distributions, self.n_iter, random_state=self.random_state )
Return ParameterSampler instance for the given distributions
def reset_terminal(): if os.name == : from .windows import cls cls() else: text = sc.reset _write(text) return text
Reset the terminal/console screen. (Also aliased to cls.) Greater than a fullscreen terminal clear, also clears the scrollback buffer. May expose bugs in dumb terminals.
def create_shortcuts(self): inspect = config_shortcut(self.inspect_current_object, context=, name=, parent=self) set_breakpoint = config_shortcut(self.set_or_clear_breakpoint, context=, name=, parent=self) set_cond_breakpoint = config_shortcut( self.set_or_edit_conditional_breakpoint, context=, name=, parent=self) gotoline = config_shortcut(self.go_to_line, context=, name=, parent=self) tab = config_shortcut(lambda: self.tab_navigation_mru(forward=False), context=, name=, parent=self) tabshift = config_shortcut(self.tab_navigation_mru, context=, name=, parent=self) prevtab = config_shortcut(lambda: self.tabs.tab_navigate(-1), context=, name=, parent=self) nexttab = config_shortcut(lambda: self.tabs.tab_navigate(1), context=, name=, parent=self) run_selection = config_shortcut(self.run_selection, context=, name=, parent=self) new_file = config_shortcut(lambda : self.sig_new_file[()].emit(), context=, name=, parent=self) open_file = config_shortcut(lambda : self.plugin_load[()].emit(), context=, name=, parent=self) save_file = config_shortcut(self.save, context=, name=, parent=self) save_all = config_shortcut(self.save_all, context=, name=, parent=self) save_as = config_shortcut(lambda : self.sig_save_as.emit(), context=, name=, parent=self) close_all = config_shortcut(self.close_all_files, context=, name=, parent=self) prev_edit_pos = config_shortcut(lambda : self.sig_prev_edit_pos.emit(), context="Editor", name="Last edit location", parent=self) prev_cursor = config_shortcut(lambda : self.sig_prev_cursor.emit(), context="Editor", name="Previous cursor position", parent=self) next_cursor = config_shortcut(lambda : self.sig_next_cursor.emit(), context="Editor", name="Next cursor position", parent=self) zoom_in_1 = config_shortcut(lambda : self.zoom_in.emit(), context="Editor", name="zoom in 1", parent=self) zoom_in_2 = config_shortcut(lambda : self.zoom_in.emit(), context="Editor", name="zoom in 2", parent=self) zoom_out = config_shortcut(lambda : self.zoom_out.emit(), context="Editor", name="zoom out", parent=self) zoom_reset = config_shortcut(lambda: self.zoom_reset.emit(), context="Editor", name="zoom reset", parent=self) close_file_1 = config_shortcut(self.close_file, context="Editor", name="close file 1", parent=self) close_file_2 = config_shortcut(self.close_file, context="Editor", name="close file 2", parent=self) run_cell = config_shortcut(self.run_cell, context="Editor", name="run cell", parent=self) run_cell_and_advance = config_shortcut(self.run_cell_and_advance, context="Editor", name="run cell and advance", parent=self) go_to_next_cell = config_shortcut(self.advance_cell, context="Editor", name="go to next cell", parent=self) go_to_previous_cell = config_shortcut(lambda: self.advance_cell(reverse=True), context="Editor", name="go to previous cell", parent=self) re_run_last_cell = config_shortcut(self.re_run_last_cell, context="Editor", name="re-run last cell", parent=self) prev_warning = config_shortcut(lambda: self.sig_prev_warning.emit(), context="Editor", name="Previous warning", parent=self) next_warning = config_shortcut(lambda: self.sig_next_warning.emit(), context="Editor", name="Next warning", parent=self) split_vertically = config_shortcut(lambda: self.sig_split_vertically.emit(), context="Editor", name="split vertically", parent=self) split_horizontally = config_shortcut(lambda: self.sig_split_horizontally.emit(), context="Editor", name="split horizontally", parent=self) close_split = config_shortcut(self.close_split, context="Editor", name="close split panel", parent=self) return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab, tabshift, run_selection, new_file, open_file, save_file, save_all, save_as, close_all, prev_edit_pos, prev_cursor, next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset, close_file_1, close_file_2, run_cell, run_cell_and_advance, go_to_next_cell, go_to_previous_cell, re_run_last_cell, prev_warning, next_warning, split_vertically, split_horizontally, close_split, prevtab, nexttab]
Create local shortcuts