repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
quantopian/qdb
qdb/comm.py
RemoteCommandManager.command_list
python
def command_list(self, tracer, payload): if not self.payload_check(payload, 'list'): return self.next_command.tailcall(tracer) filename = payload.get('file') or tracer.default_file try: if tracer.skip_fn(filename): raise KeyError if not (payload.get('start') or payload.get('end')): msg = fmt_msg( 'list', tracer.get_file(payload['file']), serial=json.dumps ) else: msg = fmt_msg( 'list', '\n'.join( tracer.get_file_lines(tracer.canonic(filename))[ int(payload.get('start')):int(payload.get('end')) ] ), serial=json.dumps ) except KeyError: msg = fmt_err_msg( 'list', 'File %s does not exist' % payload['file'], serial=json.dumps ) except TypeError: msg = fmt_err_msg( 'list', 'List slice arguments must be convertable to type int', serial=json.dumps ) return self.next_command.tailcall(msg)
List the contents of a file and defer to user control.
https://github.com/quantopian/qdb/blob/c25018d2f0979589a38a07667478cb6022d57ed9/qdb/comm.py#L556-L599
from __future__ import print_function from abc import ABCMeta, abstractmethod import atexit from bdb import Breakpoint import errno from functools import partial import json import os from pprint import pprint import signal import socket from struct import pack, unpack from textwrap import dedent from logbook import Logger from qdb.compat import ( Connection, PY3, gevent, input, items, print_, range, with_metaclass, ) from qdb.errors import ( QdbAuthenticationError, QdbBreakpointReadError, QdbCommunicationError, QdbFailedToConnect, QdbReceivedInvalidData, QdbUnreachableBreakpoint, ) from qdb.utils import Timeout, tco log = Logger('Qdb') def fmt_msg(event, payload=None, serial=None): frame = { 'e': event, 'p': payload, } return serial(frame) if serial else frame def fmt_err_msg(error_type, data, serial=None): return fmt_msg( 'error', { 'type': error_type, 'data': data, }, serial=serial, ) def fmt_breakpoint(breakpoint): return { 'file': breakpoint.file, 'line': breakpoint.line, 'temp': breakpoint.temporary, 'cond': breakpoint.cond, 'func': breakpoint.funcname, } class CommandManager(with_metaclass(ABCMeta, object)): def _fmt_stackframe(self, tracer, stackframe, line): filename = stackframe.f_code.co_filename func = stackframe.f_code.co_name code = tracer.get_line(filename, line) return { 'file': tracer.canonic(filename), 'line': line, 'func': func, 'code': code, } def send_disabled(self): try: self.send_event('disabled') except socket.error: pass def send_breakpoints(self): self.send_event( 'breakpoints', [fmt_breakpoint(breakpoint) for breakpoint in Breakpoint.bpbynumber if breakpoint] ) def send_watchlist(self, tracer): self.send_event( 'watchlist', [{'expr': k, 'exc': exc, 'value': val} for k, (exc, val) in items(tracer.watchlist)], ) def send_print(self, input_, exc, output): self.send(fmt_msg( 'print', { 'input': input_, 'exc': exc, 'output': output }, serial=json.dumps) ) def send_stack(self, tracer): stack = [] index = tracer.curindex skip_fn = tracer.skip_fn for n, (frame, line) in enumerate(tracer.stack): if skip_fn(frame.f_code.co_filename): if n < tracer.curindex: index -= 1 continue stack.append(self._fmt_stackframe(tracer, frame, line)) self.send_event( 'stack', { 'index': index, 'stack': stack, } ) def send_error(self, error_type, error_data): self.send(fmt_err_msg(error_type, error_data, serial=json.dumps)) def send_event(self, event, payload=None): self.send(fmt_msg(event, payload, serial=json.dumps)) @tco def next_command(self, tracer, msg=None): if msg: self.send(msg) return self.user_next_command(tracer) @abstractmethod def send(self, msg): raise NotImplementedError @abstractmethod def user_next_command(self, tracer): raise NotImplementedError @abstractmethod def start(self, tracer, auth_msg=''): raise NotImplementedError def stop(self): self.send_disabled() self.user_stop() @abstractmethod def user_stop(self): raise NotImplementedError class NopCommandManager(CommandManager): def user_next_command(self, tracer): pass def send(self, msg): pass def start(self, tracer, msg): pass def user_stop(self): pass class RemoteCommandManager(CommandManager): def __init__(self): super(RemoteCommandManager, self).__init__() if gevent is not None: import gipc self._pipe = gipc.pipe self._start_process = gipc.start_process else: import multiprocessing def _pipe(*args, **kwargs): a, b = multiprocessing.Pipe(*args, **kwargs) return Connection(a), Connection(b) self._pipe = _pipe def _start_process(*args, **kwargs): proc = multiprocessing.Process(*args, **kwargs) proc.start() return proc self._start_process = _start_process self.pipe = None self.socket = None self.reader = None def _socket_connect(self, tracer): log.info('Connecting to (%s, %d)' % tracer.address) for n in range(tracer.retry_attempts): try: self.socket = socket.create_connection(tracer.address) break except socket.error: log.warn( 'Client %s failed to connect to (%s, %d) on attempt %d...' % (tracer.uuid, tracer.address[0], tracer.address[1], n + 1) ) if self.socket is None: log.warn( 'Failed to connect to (%s, %d), no longer retying.' % tracer.address ) raise QdbFailedToConnect( tracer.address, tracer.retry_attempts ) log.info('Client %s connected to (%s, %d)' % (tracer.uuid, tracer.address[0], tracer.address[1])) def start(self, tracer, auth_msg=''): self.pipe, child_end = self._pipe() self._socket_connect(tracer) self.reader = self._start_process( target=ServerReader, args=(child_end, os.getpid(), self.socket.fileno(), tracer.pause_signal), ) with Timeout(5, QdbFailedToConnect(tracer.address, tracer.retry_attempts)): while True: try: self.pipe.get() break except IOError as e: if e.errno != errno.EAGAIN: raise self.send( fmt_msg( 'start', { 'uuid': tracer.uuid, 'auth': auth_msg, 'local': (0, 0), }, serial=json.dumps, ) ) signal.signal( tracer.pause_signal, partial(self._pause_handler, tracer) ) atexit.register(self.stop) def user_stop(self): if self.reader and self.reader.is_alive(): self.reader.terminate() self.socket.close() def fmt_breakpoint_dict(self, tracer, breakpoint): if 'file' not in breakpoint and tracer.default_file: breakpoint['file'] = tracer.default_file if 'file' in breakpoint and 'line' in breakpoint: breakpoint['filename'] = breakpoint.pop('file') breakpoint['lineno'] = breakpoint.pop('line') breakpoint['temporary'] = breakpoint.pop('temp', None) breakpoint['funcname'] = breakpoint.pop('func', None) breakpoint.setdefault('cond', None) return breakpoint raise QdbBreakpointReadError(breakpoint) def send(self, msg): self.socket.sendall(pack('>i', len(msg))) self.socket.sendall(msg.encode('utf-8')) def payload_check(self, payload, command): if payload is None: self.send_error('payload', '%s: expected payload' % command) return False return True def _pause_handler(self, tracer, signum, stackframe): if signum == tracer.pause_signal: tracer.set_step() def get_events(self): while self.reader.is_alive(): try: event = self.pipe.get() except IOError as i: if i.errno == errno.EAGAIN: continue raise yield event def get_commands(self, tracer): for event in self.get_events(): if event['e'] == 'error': self.handle_error(event.get('p')) else: command = getattr(self, 'command_' + event['e'], None) if not command: self.send_error('event', 'Command %s does not exist' % event['e']) else: yield lambda: command(tracer, event.get('p')) def handle_error(self, payload): if payload['type'] == 'auth': raise QdbAuthenticationError(payload['data']) else: raise QdbCommunicationError(payload) def user_next_command(self, tracer, msg=None): try: return next(self.get_commands(tracer))() except StopIteration: raise QdbCommunicationError('No more commands from server') def command_step(self, tracer, payload): tracer.set_step() def command_return(self, tracer, payload): tracer.set_return(tracer.curframe) def command_next(self, tracer, payload): tracer.set_next(tracer.curframe) def command_until(self, tracer, payload): tracer.set_until(tracer.curframe) def command_continue(self, tracer, payload): tracer.set_continue() def command_pprint(self, tracer, payload): return self.command_eval(tracer, payload, pprint=True) def command_eval(self, tracer, payload, pprint=False): if not self.payload_check(payload, 'eval'): return self.next_command.tailcall(tracer) tracer.eval_(payload, pprint) self.send_watchlist(tracer) return self.next_command.tailcall(tracer) def command_set_watch(self, tracer, payload): if not self.payload_check(payload, 'set_watch'): return self.next_command.tailcall(tracer) tracer.extend_watchlist(*payload) self.send_watchlist(tracer) return self.next_command.tailcall(tracer) def command_clear_watch(self, tracer, payload): if not self.payload_check(payload, 'clear_watch'): return self.next_command.tailcall(tracer) for w in payload: tracer.watchlist.pop(w, None) self.send_watchlist(tracer) return self.next_command.tailcall(tracer) def command_set_break(self, tracer, payload): if not self.payload_check(payload, 'set_break'): return self.next_command.tailcall(tracer) try: breakpoint = self.fmt_breakpoint_dict(tracer, payload) except QdbBreakpointReadError as b: err_msg = fmt_err_msg('set_break', str(b), serial=json.dumps) return self.next_command.tailcall(tracer, err_msg) err_msg = None try: tracer.set_break(**breakpoint) except QdbUnreachableBreakpoint as u: err_msg = fmt_err_msg( 'set_breakpoint', str(u), serial=json.dumps ) return self.next_command.tailcall(tracer, err_msg) def command_clear_break(self, tracer, payload): if not self.payload_check(payload, 'clear_break'): return self.next_command.tailcall(tracer) try: breakpoint = self.fmt_breakpoint_dict(tracer, payload) except QdbBreakpointReadError as b: err_msg = fmt_err_msg('clear_break', str(b), serial=json.dumps) return self.next_command.tailcall(tracer, err_msg) tracer.clear_break(**breakpoint) return self.next_command.tailcall(tracer)
Apache License 2.0
dojoteef/dvae
dvae/models/factory.py
ModelFactoryFunction.define_model
python
def define_model(self, graph, reuse=None, **kwargs): pass
Return a new model.
https://github.com/dojoteef/dvae/blob/93665f17b346a3f42dea7c607e4c5f8365b5895d/dvae/models/factory.py#L91-L93
from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod from abc import abstractproperty from abc import ABCMeta as AbstractBaseClass from six import iteritems from six.moves import xrange import tensorflow as tf from dvae.datasets.dataloader import Data import dvae.utils.graph as graph_utils import dvae.utils.stats as stats_utils BATCH_NORM = 'BATCH_NORM' BATCH_NORM_COLLECTION = {'moving_mean': [BATCH_NORM], 'moving_variance': [BATCH_NORM]} class ModelFactoryFunction(object): __metaclass__ = AbstractBaseClass def __init__(self, dataset, dtype): self.dtype = dtype self.dataset = dataset def batch_norm(self, layer, activation_fn=None, reuse=None): batch_norm_layer = tf.contrib.layers.batch_norm( layer, decay=0.999, trainable=False, is_training=graph_utils.is_training(layer), scale=True, center=True, activation_fn=activation_fn, updates_collections=tf.GraphKeys.UPDATE_OPS, variables_collections=BATCH_NORM_COLLECTION, scope='batch_norm', reuse=reuse) return batch_norm_layer def linear_layer(self, data, shape): dtype = self.dtype weights = tf.get_variable( 'weights', dtype=dtype, shape=shape, regularizer=tf.contrib.layers.l2_regularizer(5e-4), initializer=tf.contrib.layers.xavier_initializer(uniform=False)) biases = tf.get_variable( 'biases', dtype=dtype, shape=shape[-1:], regularizer=tf.contrib.layers.l2_regularizer(5e-4), initializer=tf.zeros_initializer) return tf.matmul(data, weights) + biases def linear_layers(self, layer, outputs, activation_fn=None, reuse=None, name='linear'): layers = [] outputs = layer.get_shape()[-1:].concatenate(outputs) for idx in xrange(len(outputs) - 1): with tf.variable_scope(name+str(idx), reuse=reuse): activate = activation_fn if idx < len(outputs) - 2 else None layer = self.linear_layer(layer, outputs[idx:idx+2]) if activation_fn: layer = self.batch_norm(layer, activation_fn=activate, reuse=reuse) layers.append(layer) return layers @abstractmethod
Apache License 2.0
speleo3/pymol-psico
psico/electrostatics.py
validate_apbs_exe
python
def validate_apbs_exe(exe): import os, subprocess if exe: exe = cmd.exp_path(exe) else: try: import freemol.apbs exe = freemol.apbs.get_exe_path() except: pass if not exe: exe = cmd.exp_path('$SCHRODINGER/utilities/apbs') if not os.path.exists(exe): exe = "apbs" try: r = subprocess.call([exe, "--version"], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT) if r < 0: raise CmdException("Broken executable: " + exe) except OSError: raise CmdException("Cannot execute: " + exe) return exe
Get and validate apbs executable. Raise CmdException if not found or broken.
https://github.com/speleo3/pymol-psico/blob/4e5402b4dca9a509b34a03691f12dc49e93c4973/psico/electrostatics.py#L47-L73
from __future__ import print_function from pymol import cmd, CmdException template_apbs_in = ''' read mol pqr "{pqrfile}" end elec mg-auto mol 1 fgcent {fgcent} # fine grid center cgcent mol 1 # coarse grid center fglen {fglen} cglen {cglen} dime {dime} lpbe # l=linear, n=non-linear Poisson-Boltzmann equation bcfl sdh # "Single Debye-Hueckel" boundary condition pdie 2.0 # protein dielectric sdie 78.0 # solvent dielectric chgm spl2 # Cubic B-spline discretization of point charges on grid srfm smol # smoothed surface for dielectric and ion-accessibility coefficients swin 0.3 temp 310.0 # temperature sdens 10.0 calcenergy no calcforce no srad {srad} # solvent radius ion charge +1 conc 0.15 radius 2.0 ion charge -1 conc 0.15 radius 1.8 write pot dx "{mapfile}" end quit '''
BSD 2-Clause Simplified License
harmon758/harmonbot
Discord/cogs/cryptography.py
Cryptography.encode_blake2b
python
async def encode_blake2b(self, ctx, *, message: str): digest = crypto_hashes.Hash(crypto_hashes.BLAKE2b(64), backend = openssl_backend) digest.update(message.encode("UTF-8")) await ctx.embed_reply(digest.finalize())
64-byte digest BLAKE2b
https://github.com/harmon758/harmonbot/blob/def3849beabdaea5e0f9c594dcf6d6d8980782bd/Discord/cogs/cryptography.py#L175-L179
from discord.ext import commands import hashlib import sys from typing import Optional import zlib from cryptography.hazmat.backends.openssl import backend as openssl_backend from cryptography.hazmat.primitives import hashes as crypto_hashes import pygost.gost28147 import pygost.gost28147_mac import pygost.gost34112012 import pygost.gost341194 import pygost.gost3412 from utilities import checks sys.path.insert(0, "..") from units.cryptography import (decode_caesar_cipher, encode_caesar_cipher, decode_morse_code, encode_morse_code, UnitOutputError) sys.path.pop(0) def setup(bot): bot.add_cog(Cryptography()) class Cryptography(commands.Cog): async def cog_check(self, ctx): return await checks.not_forbidden().predicate(ctx) @commands.group(aliases = ["decrpyt"], invoke_without_command = True, case_insensitive = True) async def decode(self, ctx): await ctx.send_help(ctx.command) @decode.group(name = "caesar", aliases = ["rot"], invoke_without_command = True, case_insensitive = True) async def decode_caesar(self, ctx, key: int, *, message: str): await ctx.embed_reply(decode_caesar_cipher(message, key)) @decode_caesar.command(name = "brute") async def decode_caesar_brute(self, ctx, *, message: str): await ctx.embed_reply('\n'.join(f"{key}: {decode_caesar_cipher(message, key)}" for key in range(26))) @decode.group(name = "gost", aliases = ["гост"], invoke_without_command = True, case_insensitive = True) async def decode_gost(self, ctx): await ctx.send_help(ctx.command) @decode_gost.group(name = "28147-89", aliases = ["магма", "magma"], invoke_without_command = True, case_insensitive = True) async def decode_gost_28147_89(self, ctx): await ctx.send_help(ctx.command) @decode_gost_28147_89.command(name = "cbc") async def decode_gost_28147_89_cbc(self, ctx, key: str, *, data: str): try: await ctx.embed_reply(pygost.gost28147.cbc_decrypt(key.encode("UTF-8"), bytearray.fromhex(data)).decode("UTF-8")) except ValueError as e: await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {e}") @decode_gost_28147_89.command(name = "cfb") async def decode_gost_28147_89_cfb(self, ctx, key: str, *, data: str): try: await ctx.embed_reply(pygost.gost28147.cfb_decrypt(key.encode("UTF-8"), bytearray.fromhex(data)).decode("UTF-8")) except ValueError as e: await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {e}") @decode_gost_28147_89.command(name = "cnt") async def decode_gost_28147_89_cnt(self, ctx, key: str, *, data: str): try: await ctx.embed_reply(pygost.gost28147.cnt(key.encode("UTF-8"), bytearray.fromhex(data)).decode("UTF-8")) except ValueError as e: await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {e}") @decode_gost_28147_89.command(name = "ecb") async def decode_gost_28147_89_ecb(self, ctx, key: str, *, data: str): try: await ctx.embed_reply(pygost.gost28147.ecb_decrypt(key.encode("UTF-8"), bytearray.fromhex(data)).decode("UTF-8")) except ValueError as e: await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {e}") @decode_gost.command(name = "34.12-2015", aliases = ["кузнечик", "kuznyechik"]) async def decode_gost_34_12_2015(self, ctx, key: str, *, data: str): if len(key) < 32: return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: key length must be at least 32") if len(data) < 16: return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: data length must be at least 16") await ctx.embed_reply(pygost.gost3412.GOST3412Kuznechik(key.encode("UTF-8")).decrypt(bytearray.fromhex(data)).decode("UTF-8")) @decode.command(name = "morse") async def decode_morse(self, ctx, *, message: str): try: await ctx.embed_reply(decode_morse_code(message)) except UnitOutputError as e: await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {e}") @decode.command(name = "qr") async def decode_qr(self, ctx, file_url: Optional[str]): if not file_url: if ctx.message.attachments: file_url = ctx.message.attachments[0].url else: return await ctx.embed_reply(f"{ctx.bot.error_emoji} Please input a file url or attach an image") url = f"https://api.qrserver.com/v1/read-qr-code/" params = {"fileurl": file_url} async with ctx.bot.aiohttp_session.get(url, params = params) as resp: if resp.status == 400: return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error") data = await resp.json() if data[0]["symbol"][0]["error"]: return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: {data[0]['symbol'][0]['error']}") decoded = data[0]["symbol"][0]["data"].replace("QR-Code:", "") if len(decoded) > ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT: return await ctx.embed_reply(decoded[:ctx.bot.EDCL - 3] + "...", footer_text = "Decoded message exceeded character limit") await ctx.embed_reply(decoded) @decode.command(name = "reverse") async def decode_reverse(self, ctx, *, message: str): await ctx.embed_reply(message[::-1]) @commands.group(aliases = ["encrypt"], invoke_without_command = True, case_insensitive = True) async def encode(self, ctx): await ctx.send_help(ctx.command) @encode.command(name = "adler32", aliases = ["adler-32"]) async def encode_adler32(self, ctx, *, message: str): await ctx.embed_reply(zlib.adler32(message.encode("UTF-8"))) @encode.command(name = "blake2b")
MIT License
jaegertracing/jaeger-client-python
jaeger_client/thrift_gen/jaeger/Collector.py
Client.submitBatches
python
def submitBatches(self, batches): self._seqid += 1 future = self._reqs[self._seqid] = concurrent.Future() self.send_submitBatches(batches) return future
Parameters: - batches
https://github.com/jaegertracing/jaeger-client-python/blob/a6c973158bf9b02cd7f5a966ccfd29ab86c44a5b/jaeger_client/thrift_gen/jaeger/Collector.py#L70-L78
import six from six.moves import xrange from thrift.Thrift import TType, TMessageType, TException, TApplicationException import logging from .ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None from tornado import gen from tornado import concurrent from thrift.transport import TTransport class Iface(object): def submitBatches(self, batches): pass class Client(Iface): def __init__(self, transport, iprot_factory, oprot_factory=None): self._transport = transport self._iprot_factory = iprot_factory self._oprot_factory = (oprot_factory if oprot_factory is not None else iprot_factory) self._seqid = 0 self._reqs = {} self._transport.io_loop.spawn_callback(self._start_receiving) @gen.coroutine def _start_receiving(self): while True: try: frame = yield self._transport.readFrame() except TTransport.TTransportException as e: for future in self._reqs.itervalues(): future.set_exception(e) self._reqs = {} return tr = TTransport.TMemoryBuffer(frame) iprot = self._iprot_factory.getProtocol(tr) (fname, mtype, rseqid) = iprot.readMessageBegin() future = self._reqs.pop(rseqid, None) if not future: continue method = getattr(self, 'recv_' + fname) try: result = method(iprot, mtype, rseqid) except Exception as e: future.set_exception(e) else: future.set_result(result)
Apache License 2.0
zhaozhibin/dl-based-intelligent-diagnosis-benchmark
AE_Datasets/R_NA/datasets/XJTU.py
data_load
python
def data_load(filename,label): fl = pd.read_csv(filename) fl = fl["Horizontal_vibration_signals"] fl = fl.values fl = fl.reshape(-1,1) data=[] lab=[] start,end=0,signal_size while end<=fl.shape[0]: data.append(fl[start:end]) lab.append(label) start +=signal_size end +=signal_size return data, lab
This function is mainly used to generate test data and training data. filename:Data location
https://github.com/zhaozhibin/dl-based-intelligent-diagnosis-benchmark/blob/6dca48f36c2a0bceaad8329089100045fe440bbb/AE_Datasets/R_NA/datasets/XJTU.py#L53-L70
import os import pandas as pd from sklearn.model_selection import train_test_split from datasets.SequenceDatasets import dataset from datasets.sequence_aug import * from tqdm import tqdm signal_size=1024 label1 = [i for i in range(0,5)] label2 = [i for i in range(5,10)] label3 = [i for i in range(10,15)] def get_files(root, test=False): WC = os.listdir(root) datasetname1 = os.listdir(os.path.join(root, WC[0])) datasetname2 = os.listdir(os.path.join(root, WC[1])) datasetname3 = os.listdir(os.path.join(root, WC[2])) data = [] lab =[] for i in tqdm(range(len(datasetname1))): files = os.listdir(os.path.join('/tmp',root,WC[0],datasetname1[i])) for ii in [-4,-3,-2,-1]: path1 = os.path.join('/tmp',root,WC[0],datasetname1[i],files[ii]) data1, lab1 = data_load(path1,label=label1[i]) data += data1 lab +=lab1 for j in tqdm(range(len(datasetname2))): files = os.listdir(os.path.join('/tmp',root,WC[1],datasetname2[j])) for jj in [-4,-3, -2, -1]: path2 = os.path.join('/tmp',root,WC[1],datasetname2[j],files[jj]) data2, lab2 = data_load(path2,label=label2[j]) data += data2 lab += lab2 for k in tqdm(range(len(datasetname3))): files = os.listdir(os.path.join('/tmp',root,WC[2],datasetname3[k])) for kk in [-4,-3, -2, -1]: path3 = os.path.join('/tmp',root,WC[2],datasetname3[k],files[kk]) data3, lab3 = data_load(path3,label=label3[k]) data += data3 lab += lab3 return [data, lab]
MIT License
micom-dev/micom
micom/community.py
Community.__init__
python
def __init__( self, taxonomy, model_db=None, id=None, name=None, rel_threshold=1e-6, solver=None, progress=True, max_exchange=100, mass=1, ): super(Community, self).__init__(id, name) logger.info("building new micom model {}.".format(id)) if not solver: solver = [ s for s in ["cplex", "gurobi", "osqp", "glpk"] if s in cobra.util.solver.solvers ][0] logger.info("using the %s solver." % solver) if solver == "glpk": logger.warning( "No QP solver found, will use GLPK. A lot of functionality " "in MICOM will require a QP solver :/" ) self.solver.configuration.lp_method = "auto" self.solver.configuration.qp_method = "auto" self.solver.configuration.presolve = False self.solver = solver self._rtol = rel_threshold self._modification = None self.mass = mass self.__db_metrics = None adjust_solver_config(self.solver) taxonomy = taxonomy.copy() if "abundance" not in taxonomy.columns: taxonomy["abundance"] = 1 taxonomy.abundance /= taxonomy.abundance.sum() logger.info( "{} individuals with abundances below threshold".format( (taxonomy.abundance <= self._rtol).sum() ) ) taxonomy = taxonomy[taxonomy.abundance > self._rtol] if not ( isinstance(taxonomy, pd.DataFrame) and any(taxonomy.columns.isin(["id"] + _ranks)) ): raise ValueError( "`taxonomy` must be a pandas DataFrame with at least" " a column `id` or a rank :(" ) if model_db is None and "file" not in taxonomy.columns: raise ValueError( "If no model database is specified you need to pass " "file names for models in a `file` column as well." ) compressed = False if model_db is not None: compressed = model_db.endswith(".qza") or model_db.endswith(".zip") if compressed: tdir = TemporaryDirectory(prefix="micom_") if "file" in taxonomy.columns: logger.warning( "The table includes a `file` column even though a model database " "is used. Will ignore it and use the model database instead." ) del taxonomy["file"] if model_db.endswith(".qza"): manifest = load_qiime_model_db(model_db, tdir.name) elif model_db.endswith(".zip"): manifest = load_zip_model_db(model_db, tdir.name) else: manifest = load_manifest(model_db) rank = manifest["summary_rank"][0] if rank not in taxonomy.columns: raise ValueError("Missing the column `%s` from the taxonomy." % rank) if "id" not in taxonomy.columns: taxonomy["id"] = taxonomy[rank] keep_cols = [ r for r in _ranks[0 : (_ranks.index(rank) + 1)] if r in taxonomy.columns and r in manifest.columns ] manifest = manifest[keep_cols + ["file"]] merged = pd.merge(taxonomy, manifest, on=keep_cols) self.__db_metrics = pd.Series( { "found_taxa": merged.shape[0], "total_taxa": taxonomy.shape[0], "found_fraction": merged.shape[0] / taxonomy.shape[0], "found_abundance_fraction": merged.abundance.sum(), } ) logger.info( "Matched %g%% of total abundance in model DB." % (100.0 * self.__db_metrics[3]) ) if self.__db_metrics["found_abundance_fraction"] < 0.5: logger.warning( "Less than 50%% of the abundance could be matched to the " "model database. Model `%s` may not be representative " "of the sample" % self.id ) taxonomy = merged taxonomy["abundance"] /= taxonomy["abundance"].sum() if taxonomy.id.str.contains(r"[^A-Za-z0-9_]", regex=True).any(): logger.warning( "Taxa IDs contain prohibited characters and will be reformatted." ) taxonomy.id = taxonomy.id.replace(r"[^A-Za-z0-9_\s]+", "_", regex=True) self.__taxonomy = taxonomy self.__taxonomy.index = self.__taxonomy.id obj = Zero self.taxa = [] index = self.__taxonomy.index index = track(index, description="Building") if progress else index for idx in index: row = self.__taxonomy.loc[idx] if isinstance(row.file, list): if len(row.file) > 1: model = join_models(row.file) logger.info("joined {} models".format(len(row.file))) else: model = load_model(row.file[0]) else: model = load_model(row.file) suffix = "__" + idx.replace(" ", "_").strip() logger.info("converting IDs for {}".format(idx)) external = cobra.medium.find_external_compartment(model) logger.info( "Identified %s as the external compartment for %s. " "If that is wrong you may be in trouble..." % (external, idx) ) for r in model.reactions: r.global_id = clean_ids(r.id) r.id = r.global_id + suffix r.community_id = idx r._compartments = None if "sbo" in r.annotation: del r.annotation["sbo"] for m in model.metabolites: m.global_id = clean_ids(m.id) m.id = m.global_id + suffix m.compartment += suffix m.community_id = idx logger.info("adding reactions for {} to community".format(idx)) self.add_reactions(model.reactions) o = self.solver.interface.Objective.clone( model.objective, model=self.solver ) obj += o.expression * row.abundance self.taxa.append(idx) taxa_obj = self.problem.Constraint( o.expression, name="objective_" + idx, lb=0.0 ) self.add_cons_vars([taxa_obj]) self.__add_exchanges( model.reactions, row, external_compartment=external, internal_exchange=max_exchange, ) self.solver.update() if compressed: tdir.cleanup() com_obj = add_var_from_expression(self, "community_objective", obj, lb=0) self.objective = self.problem.Objective(com_obj, direction="max")
Create a new community object. `micom` builds a community from a taxonomy which may simply be a list of model files in its simplest form. Usually, the taxonomy will contain additional information such as annotations for the individuals (for instance phylum, organims or species) and abundances. The recommended way to build a micom model is to supply a quantification of taxa (called "taxonomy" here) which specifies the taxonomic ranks for a taxon and its abundance, and a model database for a specific rank (for instance "genus"). MICOM will match the ranks from your taxonomy to the model database and assemble the community models from that. You will also get information about the construction process by calling `Community.build_metrics`. The most customizable way only takes a single table where summarization and matching to the reference database has already occured. In this case you will also provide paths to model files for each taxon. This is the "old" way but may still be applicable if you want to use a custom database or want full control of matching your data to reference models. Notes ----- `micom` will automatically add exchange fluxes and and a community objective maximizing the overall growth rate of the community. Parameters ---------- taxonomy : pandas.DataFrame The taxonomy used for building the model. Must have at least the column "id" or a column specifying a taxonomic rank. If no model database is specified in the next argument it furthermore requires a column "file" which specifies a filepath for each model. Valid file extensions are ".pickle", ".xml", ".xml.gz" and ".json". If a model database is specified this must contain at least a column with the same name as the rank used in the model database. Thus, for a genus-level database you will need a column `genus`. Additional taxa ranks can also be specified and will be used to be more stringent in taxa matching. Finally, the taxonomy should contain a column `abundance`. It will be used to quantify each individual in the community. If absent, MICOM will assume all individuals are present in the same amount. model_db : str A pre-built model database. If ending in `.qza` must be a Qiime 2 artifact of type `MetabolicModels[JSON]`. Can also be a folder, zip (must end in `.zip`) file or None if the taxonomy contains a column `file`. id : str, optional The ID for the community. Should only contain letters and numbers, otherwise it will be formatted as such. name : str, optional The name for the community. rel_threshold : float < 1, optional The relative abundance threshold that will be used. Describes the smallest relative amount of an individual that will be considered non-zero. All individuals with a smaller relative amount will be omitted. solver : str, optional Which solver to use. Will default to cplex if available which is better suited for large problems. progress : bool, optional Show a progress bar. max_exchange : positive float, optional During model constructions exchange reactions are duplicated into internal and external exchange reactions. This specifies the new import flux bound for the *internal* exchange reaction. Import rates for the exchanges between the medium and outside are still mantained. mass : positive float, optional The total mass of the community in gDW. Used to adjust import fluxes which are assumed to be given as mmol/gDW*h for the entire community. As a consequence all import fluxes will be divided by that number. Attributes ---------- taxa : list A list of taxa IDs in the community.
https://github.com/micom-dev/micom/blob/b8e3eda8a97ed2fe08e1416711d3a4f1f98bc3a9/micom/community.py#L40-L299
import re import pickle import cobra import pandas as pd from optlang.symbolics import Zero from micom.db import load_zip_model_db, load_manifest from micom.util import ( load_model, join_models, add_var_from_expression, adjust_solver_config, clean_ids, compartment_id, COMPARTMENT_RE, ) from micom.logger import logger from micom.optcom import optcom, solve from micom.problems import cooperative_tradeoff, knockout_taxa from micom.qiime_formats import load_qiime_model_db from rich.progress import track from tempfile import TemporaryDirectory _ranks = ["kingdom", "phylum", "class", "order", "family", "genus", "species", "strain"] cobra.io.sbml.LOGGER.setLevel("ERROR") cobra.util.solver.logger.setLevel("ERROR") class Community(cobra.Model):
Apache License 2.0
geophysics-ubonn/reda
lib/reda/eis/units.py
get_label
python
def get_label(parameter, ptype, flavor=None, mpl=None): if flavor is not None: if flavor not in ('latex', 'mathml'): raise Exception('flavor not recognized: {}'.format(flavor)) else: if mpl is None: raise Exception('either the flavor or mpl must be provided') rendering = mpl.rcParams['text.usetex'] if rendering: flavor = 'latex' else: flavor = 'mathml' if parameter not in labels: raise Exception('parameter not known') if ptype not in labels[parameter]: raise Exception('ptype not known') if flavor not in labels[parameter][ptype]: raise Exception('flavor not known') return labels[parameter][ptype][flavor]
Return the label of a given SIP parameter Parameters ---------- parameter : str type of parameter, e.g. rmag|rpha|cre|cim ptype : string material|meas. Either return the material property (e.g. resistivity) or the measurement parameter (e.g., impedance) flavor : string, optional if set, must be one of latex|mathml. Return a label for latex processing, or for mathml processing mpl : matplotlib, optional if set, infer flavor from mpl.rcParams. Will not be used if flavor is set Returns ------- label : string the requested label
https://github.com/geophysics-ubonn/reda/blob/5be52ecb184f45f0eabb23451f039fec3d9537c5/lib/reda/eis/units.py#L47-L90
labels = { 'rmag': { 'material': { 'latex': r'$|\rho|~[\Omega m]$', 'mathml': r'$|\rho| [\Omega m]$', }, 'meas': { 'latex': r'$|Z|~[\Omega]$', 'mathml': r'$|Z| [\Omega]$', }, }, 'rpha': { 'material': { 'latex': r'$-\phi~[mrad]$', 'mathml': r'$-\phi [mrad]$', }, 'meas': { 'latex': r'$-\varphi~[mrad]$', 'mathml': r'$-\varphi [mrad]$', }, }, 'cre': { 'material': { 'latex': r"$\sigma'~[S/m]$", 'mathml': r"$\sigma' [S/m]$", }, 'meas': { 'latex': r"$Y'~[S]$", 'mathml': r"$Y' [S]$", }, }, 'cim': { 'material': { 'latex': r"$\sigma''~[S/m]$", 'mathml': r"$\sigma'' [S/m]$", }, 'meas': { 'latex': r"$Y''~[S]$", 'mathml': r"$Y'' [S]$", }, }, }
MIT License
microsoft/univl
modules/until_module.py
PreTrainedModel.from_pretrained
python
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs): model = cls(config, *inputs, **kwargs) if state_dict is None: return model model = cls.init_preweight(model, state_dict) return model
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed.
https://github.com/microsoft/univl/blob/0a7c07f566a3b220731f4abcaa6e1ee59a686596/modules/until_module.py#L166-L177
import logging import numpy as np import torch from torch import nn import torch.nn.functional as F import math from modules.until_config import PretrainedConfig logger = logging.getLogger(__name__) def gelu(x): return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(LayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class PreTrainedModel(nn.Module): def __init__(self, config, *inputs, **kwargs): super(PreTrainedModel, self).__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, LayerNorm): if 'beta' in dir(module) and 'gamma' in dir(module): module.beta.data.zero_() module.gamma.data.fill_(1.0) else: module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def resize_token_embeddings(self, new_num_tokens=None): raise NotImplementedError @classmethod def init_preweight(cls, model, state_dict, prefix=None, task_config=None): old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) if prefix is not None: old_keys = [] new_keys = [] for key in state_dict.keys(): old_keys.append(key) new_keys.append(prefix + key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='') if prefix is None and (task_config is None or task_config.local_rank == 0): logger.info("-" * 20) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}" .format(model.__class__.__name__, "\n " + "\n ".join(missing_keys))) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}" .format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys))) if len(error_msgs) > 0: logger.error("Weights from pretrained model cause errors in {}: {}" .format(model.__class__.__name__, "\n " + "\n ".join(error_msgs))) return model @property def dtype(self): try: return next(self.parameters()).dtype except StopIteration: def find_tensor_attributes(module: nn.Module): tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype @classmethod
MIT License
genomoncology/related
src/related/validators.py
regex
python
def regex(match_string): return _RegexValidator(match_string)
A validator that executes each validator passed as arguments.
https://github.com/genomoncology/related/blob/3799cde862b8c9500931706f5f1ce5576028f642/src/related/validators.py#L42-L46
from attr import attr, attributes import re @attributes(repr=False, slots=True) class _CompositeValidator(object): validators = attr() def __call__(self, inst, attr, value): for validator in self.validators: validator(inst, attr, value) def __repr__(self): return ( "<composite validator for validators {!r}>".format(self.validators) ) def composite(*validators): return _CompositeValidator(validators) @attributes(repr=False, slots=True) class _RegexValidator(object): regex = attr() def __call__(self, inst, attr, value): if not re.match(self.regex, value): raise TypeError( "'{name}' must match {regex!r} (got {value!r}).".format( name=attr.name, regex=self.regex, value=value), attr, self.regex, value) def __repr__(self): return ( "<regex validator for {!r}>".format(self.regex) )
MIT License
iperdance/ipercore
iPERCore/tools/trainers/lwg_trainer.py
LWGTrainer.optimize_D
python
def optimize_D(self, fake_bg, fake_tsf_imgs): bs, nt, c, h, w = fake_tsf_imgs.shape fake_tsf_imgs = fake_tsf_imgs.view(bs * nt, c, h, w) real_tsf_imgs = self._real_tsf.reshape(bs * nt, c, h, w) tsf_cond = self._input_G_tsf[:, :, -3:].view(bs * nt, -1, h, w) fake_input_D = torch.cat([fake_tsf_imgs.detach(), tsf_cond], dim=1) real_input_D = torch.cat([real_tsf_imgs, tsf_cond], dim=1) real_inputs = { "x": real_input_D, "bg_x": None, "body_rects": self._body_bbox, "head_rects": self._head_bbox, "get_avg": True } d_real_outs, self._d_real = self.D(real_inputs) fake_inputs = { "x": fake_input_D, "bg_x": None, "body_rects": self._body_bbox, "head_rects": self._head_bbox, "get_avg": True } d_fake_outs, self._d_fake = self.D(fake_inputs) _loss_d_real = self.crt_gan(d_real_outs, 1) _loss_d_fake = self.crt_gan(d_fake_outs, -1) return _loss_d_real + _loss_d_fake
Args: fake_bg (torch.Tensor): fake_tsf_imgs (torch.Tensor): Returns:
https://github.com/iperdance/ipercore/blob/1c15b8208a4313c91ce6bf7a97a15fe43cee4a74/iPERCore/tools/trainers/lwg_trainer.py#L791-L832
import abc import torch import torch.nn.functional as F from collections import OrderedDict from iPERCore.models.networks import NetworksFactory from iPERCore.models.networks.criterions import VGGLoss, FaceLoss, LSGANLoss, TVLoss, TemporalSmoothLoss from iPERCore.tools.utils.filesio.cv_utils import tensor2im from .base import BaseTrainerModel, FlowCompositionForTrainer __all__ = ["LWGTrainerABC", "LWGTrainer", "LWGAugBGTrainer", "LWGFrontTrainer"] class LWGTrainerABC(BaseTrainerModel, abc.ABC): def __init__(self, opt, device): super(LWGTrainerABC, self).__init__(opt) self._name = "LWGTrainerABC" self.device = device self._share_bg = self._opt.share_bg self._ns = self._opt.num_source self._nt = self._opt.time_step self._train_opts = self._opt.Train self._aug_bg = self._train_opts.aug_bg self._use_gan = self._train_opts.lambda_D_prob > 0 self._create_network() self._make_optimizer() self._init_losses() if self._opt.load_iter > 0: self.load() else: if self._opt.load_path_G != "None": self.load_params(self.G, self._opt.load_path_G, need_module=False) if self._opt.load_path_D != "None" and self._use_gan: self.load_params(self.D, self._opt.load_path_D, need_module=False) def _create_network(self): self.flow_comp = FlowCompositionForTrainer(opt=self._opt) self.flow_comp.eval() self.G = self._create_generator(cfg=self._opt.neural_render_cfg.Generator) if self._use_gan: self.D = self._create_discriminator(cfg=self._opt.neural_render_cfg.Discriminator) else: self.D = None def _create_generator(self, cfg): gen_name = self._opt.gen_name return NetworksFactory.get_by_name(gen_name, cfg=cfg, temporal=self._opt.temporal) def _create_discriminator(self, cfg): dis_name = self._opt.dis_name return NetworksFactory.get_by_name(dis_name, cfg=cfg, use_aug_bg=self._aug_bg) def _make_optimizer(self): self._current_lr_G = self._train_opts.lr_G self._current_lr_D = self._train_opts.lr_D self._optimizer_G = torch.optim.Adam(self.G.parameters(), lr=self._current_lr_G, betas=(self._train_opts.G_adam_b1, self._train_opts.G_adam_b2)) if self._use_gan: self._optimizer_D = torch.optim.Adam(self.D.parameters(), lr=self._current_lr_D, betas=(self._train_opts.D_adam_b1, self._train_opts.D_adam_b2)) def _init_losses(self): self.crt_l1 = torch.nn.L1Loss() self.crt_mask = torch.nn.BCELoss() if self._train_opts.use_vgg != "None": self.crt_tsf = VGGLoss(vgg_type=self._train_opts.use_vgg, ckpt_path=self._train_opts.vgg_loss_path, resize=True) else: self.crt_tsf = torch.nn.L1Loss() if self._train_opts.use_face: self.crt_face = FaceLoss(pretrained_path=self._train_opts.face_loss_path, factor=self._train_opts.face_factor) self.crt_gan = LSGANLoss() self.crt_tv = TVLoss() self.crt_ts = TemporalSmoothLoss() self._loss_g_rec = 0.0 self._loss_g_tsf = 0.0 self._loss_g_adv = 0.0 self._loss_g_mask = 0.0 self._loss_g_smooth = 0.0 self._loss_g_face = 0.0 self._d_real = 0.0 self._d_fake = 0.0 def multi_gpu_wrapper(self, f): self.crt_tsf = self.crt_tsf.to(self.device) self.flow_comp = self.flow_comp.to(self.device) self.G = f(self.G.to(self.device)) if self._train_opts.use_face: self.crt_face = self.crt_face.to(self.device) if self._use_gan: self.D = f(self.D.to(self.device)) return self def gpu_wrapper(self): self.crt_tsf = self.crt_tsf.to(self.device) self.flow_comp = self.flow_comp.to(self.device) self.G = self.G.to(self.device) if self._train_opts.use_face: self.crt_face = self.crt_face.to(self.device) if self._use_gan: self.D = self.D.to(self.device) return self def set_train(self): self.G.train() if self._use_gan: self.D.train() def set_eval(self): self.G.eval() def get_current_errors(self): loss_g_face = self._loss_g_face if isinstance(self._loss_g_face, float) else self._loss_g_face.item() loss_g_smooth = self._loss_g_smooth if isinstance(self._loss_g_smooth, float) else self._loss_g_smooth.item() loss_g_mask = self._loss_g_mask if isinstance(self._loss_g_mask, float) else self._loss_g_mask.item() loss_g_rec = self._loss_g_rec if isinstance(self._loss_g_rec, float) else self._loss_g_rec.item() loss_g_tsf = self._loss_g_tsf if isinstance(self._loss_g_tsf, float) else self._loss_g_tsf.item() loss_g_adv = self._loss_g_adv if isinstance(self._loss_g_adv, float) else self._loss_g_adv.item() d_real = self._d_real if isinstance(self._d_real, float) else self._d_real.item() d_fake = self._d_fake if isinstance(self._d_fake, float) else self._d_fake.item() loss_dict = OrderedDict([("g_rec", loss_g_rec), ("g_tsf", loss_g_tsf), ("g_face", loss_g_face), ("g_adv", loss_g_adv), ("g_mask", loss_g_mask), ("g_mask_smooth", loss_g_smooth), ("d_real", d_real), ("d_fake", d_fake)]) return loss_dict def get_current_scalars(self): return OrderedDict([("lr_G", self._current_lr_G), ("lr_D", self._current_lr_D)]) def get_current_visuals(self): visuals = OrderedDict() visuals["0_source"] = self._vis_source visuals["1_uv_img"] = self._vis_uv_img visuals["2_real_img"] = self._vis_real visuals["3_fake_src"] = self._vis_fake_src visuals["4_fake_tsf"] = self._vis_fake_tsf visuals["5_fake_bg"] = self._vis_fake_bg visuals["6_fake_mask"] = self._vis_mask visuals["7_body_mask"] = self._vis_body_mask return visuals @torch.no_grad() def visual_imgs(self, fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks): self._vis_fake_bg = tensor2im(fake_bg[0], idx=-1) self._vis_fake_src = tensor2im(fake_src_imgs[0], idx=-1) self._vis_fake_tsf = tensor2im(fake_tsf_imgs[0], idx=-1) self._vis_uv_img = tensor2im(self._uv_img, idx=-1) self._vis_real = tensor2im(self._real_tsf[0], idx=-1) self._vis_source = tensor2im(self._real_src[0], idx=-1) ids = self._opt.num_source - 1 self._vis_mask = tensor2im(fake_masks[0, ids:], idx=-1) self._vis_body_mask = tensor2im(self._body_mask[0, ids:], idx=-1) def save(self, label): if "module" in self.G.__dict__: self.save_network(self.G.module, "G", label) if self._use_gan: self.save_network(self.D.module, "D", label) else: self.save_network(self.G, "G", label) if self._use_gan: self.save_network(self.D, "D", label) self.save_optimizer(self._optimizer_G, "G", label) if self._use_gan: self.save_optimizer(self._optimizer_D, "D", label) def load(self): load_iter = self._opt.load_iter self.load_network(self.G, "G", load_iter, need_module=False) if self._use_gan: self.load_network(self.D, "D", load_iter, need_module=False) def update_learning_rate(self): final_lr = self._train_opts.final_lr lr_decay_G = (self._train_opts.lr_G - final_lr) / self._train_opts.niters_or_epochs_decay self._current_lr_G -= lr_decay_G for param_group in self._optimizer_G.param_groups: param_group["lr"] = self._current_lr_G print("update G learning rate: %f -> %f" % (self._current_lr_G + lr_decay_G, self._current_lr_G)) if self._use_gan: lr_decay_D = (self._train_opts.lr_D - final_lr) / self._train_opts.niters_or_epochs_decay self._current_lr_D -= lr_decay_D for param_group in self._optimizer_D.param_groups: param_group["lr"] = self._current_lr_D print("update D learning rate: %f -> %f" % (self._current_lr_D + lr_decay_D, self._current_lr_D)) def optimize_parameters(self, trainable=True, keep_data_for_visuals=False): fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks = self.forward( keep_data_for_visuals=keep_data_for_visuals) if trainable: loss_G = self.optimize_G(fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks) self._optimizer_G.zero_grad() loss_G.backward() self._optimizer_G.step() if self._use_gan: loss_D = self.optimize_D(fake_bg, fake_tsf_imgs) self._optimizer_D.zero_grad() loss_D.backward() self._optimizer_D.step() @abc.abstractmethod def forward(self, keep_data_for_visuals): pass @abc.abstractmethod def optimize_G(self, *args, **kwargs): pass @abc.abstractmethod def optimize_D(self, *args, **kwargs): pass class LWGAugBGTrainer(LWGTrainerABC): def __init__(self, opt, device): super().__init__(opt, device) self._name = "LWGAugTrainer" def set_input(self, inputs, device): with torch.no_grad(): images = inputs["images"].to(device, non_blocking=True) aug_bg = inputs["bg"].to(device, non_blocking=True) smpls = inputs["smpls"].to(device, non_blocking=True) masks = inputs["masks"].to(device, non_blocking=True) offsets = inputs["offsets"].to(device, non_blocking=True) if "offsets" in inputs else 0 links_ids = inputs["links_ids"].to(device, non_blocking=True) if "links_ids" in inputs else None ns = self._ns src_img = images[:, 0:ns].contiguous() src_smpl = smpls[:, 0:ns].contiguous() tsf_img = images[:, ns:].contiguous() tsf_smpl = smpls[:, ns:].contiguous() src_mask = masks[:, 0:ns].contiguous() ref_mask = masks[:, ns:].contiguous() input_G_bg, input_G_src, input_G_tsf, Tst, Ttt, src_mask, tsf_mask, head_bbox, body_bbox, uv_img = self.flow_comp(src_img, tsf_img, src_smpl, tsf_smpl, src_mask=src_mask, ref_mask=ref_mask, links_ids=links_ids, offsets=offsets, temporal=self._opt.temporal) self._real_src = src_img self._real_tsf = tsf_img self._head_bbox = head_bbox self._body_bbox = body_bbox self._body_mask = masks self._uv_img = uv_img self._Tst = Tst self._Ttt = Ttt self._input_G_src = input_G_src self._input_G_tsf = input_G_tsf if not self._share_bg: input_G_bg_tsf = torch.cat([tsf_img * tsf_mask, tsf_mask], dim=2) input_G_bg = torch.cat([input_G_bg, input_G_bg_tsf], dim=1) src_mask = src_mask[:, 0:1] inpug_G_aug_bg = torch.cat([aug_bg[:, None] * src_mask, src_mask], dim=2) input_G_bg = torch.cat([input_G_bg, inpug_G_aug_bg], dim=1) self._real_bg = aug_bg self._input_G_bg = input_G_bg def forward(self, keep_data_for_visuals=False): fake_bg, fake_src_color, fake_src_mask, fake_tsf_color, fake_tsf_mask = self.G( self._input_G_bg, self._input_G_src, self._input_G_tsf, Tst=self._Tst, Ttt=self._Ttt, only_tsf=False) if not self._opt.share_bg: fake_bg_src = fake_bg[:, 0:self._ns] fake_bg_tsf = fake_bg[:, self._ns:self._ns + self._nt] else: fake_bg_src = fake_bg[:, 0:1] fake_bg_tsf = fake_bg_src fake_src_imgs = fake_src_mask * fake_bg_src + (1 - fake_src_mask) * fake_src_color fake_tsf_imgs = fake_tsf_mask * fake_bg_tsf + (1 - fake_tsf_mask) * fake_tsf_color fake_masks = torch.cat([fake_src_mask, fake_tsf_mask], dim=1) if keep_data_for_visuals: self.visual_imgs(fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks) return fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks def optimize_G(self, fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks): ns = fake_src_imgs.shape[1] bs, nt, c, h, w = fake_tsf_imgs.shape fake_tsf_imgs = fake_tsf_imgs.view(bs * nt, c, h, w) real_tsf_imgs = self._real_tsf.view(bs * nt, -1, h, w) tsf_cond = self._input_G_tsf[:, :, -3:].view(bs * nt, -1, h, w) fake_input_D = torch.cat([fake_tsf_imgs, tsf_cond], dim=1) fake_aug_bg = fake_bg[:, -1] fake_global = torch.cat([fake_aug_bg, self._input_G_bg[:, -1, -1:]], dim=1) d_inputs = { "x": fake_input_D, "bg_x": fake_global, "body_rects": self._body_bbox, "head_rects": self._head_bbox, "get_avg": False } d_fake_outs = self.D(d_inputs) self._loss_g_adv = self.crt_gan(d_fake_outs, 0) * self._train_opts.lambda_D_prob self._loss_g_rec = (self.crt_l1(fake_src_imgs, self._real_src) + self.crt_l1(fake_aug_bg, self._real_bg)) / 2 * self._train_opts.lambda_rec self._loss_g_tsf = self.crt_tsf(fake_tsf_imgs, real_tsf_imgs) * self._train_opts.lambda_tsf if self._train_opts.use_face: self._loss_g_face = self.crt_face( fake_tsf_imgs, real_tsf_imgs, bbox1=self._head_bbox, bbox2=self._head_bbox) * self._train_opts.lambda_face fake_masks = fake_masks.view(bs * (ns + nt), 1, h, w) body_masks = self._body_mask.view(bs * (ns + nt), 1, h, w) self._loss_g_mask = self.crt_mask(fake_masks, body_masks) * self._train_opts.lambda_mask self._loss_g_smooth = self.crt_tv(fake_masks) * self._train_opts.lambda_mask_smooth return self._loss_g_rec + self._loss_g_tsf + self._loss_g_face + self._loss_g_adv + self._loss_g_mask + self._loss_g_smooth def optimize_D(self, fake_bg, fake_tsf_imgs): bs, nt, c, h, w = fake_tsf_imgs.shape fake_tsf_imgs = fake_tsf_imgs.view(bs * nt, c, h, w) real_tsf_imgs = self._real_tsf.reshape(bs * nt, c, h, w) tsf_cond = self._input_G_tsf[:, :, -3:].view(bs * nt, -1, h, w) fake_input_D = torch.cat([fake_tsf_imgs.detach(), tsf_cond], dim=1) real_input_D = torch.cat([real_tsf_imgs, tsf_cond], dim=1) fake_aug_bg = fake_bg[:, -1] fake_bg_x = torch.cat([fake_aug_bg.detach(), self._input_G_bg[:, -1, -1:]], dim=1) real_bg_x = torch.cat([self._real_bg, self._input_G_bg[:, -1, -1:]], dim=1) real_inputs = { "x": real_input_D, "bg_x": real_bg_x, "body_rects": self._body_bbox, "head_rects": self._head_bbox, "get_avg": True } d_real_outs, self._d_real = self.D(real_inputs) fake_inputs = { "x": fake_input_D, "bg_x": fake_bg_x, "body_rects": self._body_bbox, "head_rects": self._head_bbox, "get_avg": True } d_fake_outs, self._d_fake = self.D(fake_inputs) _loss_d_real = self.crt_gan(d_real_outs, 1) _loss_d_fake = self.crt_gan(d_fake_outs, -1) return _loss_d_real + _loss_d_fake class LWGTrainer(LWGTrainerABC): def __init__(self, opt, device): super(LWGTrainer, self).__init__(opt, device) self._name = "LWGTrainer" def set_input(self, inputs, device): with torch.no_grad(): images = inputs["images"].to(device, non_blocking=True) bg = inputs["bg"].to(device, non_blocking=True) smpls = inputs["smpls"].to(device, non_blocking=True) masks = inputs["masks"].to(device, non_blocking=True) offsets = inputs["offsets"].to(device, non_blocking=True) links_ids = inputs["links_ids"].to(device, non_blocking=True) if "links_ids" in inputs else None ns = self._ns src_img = images[:, 0:ns].contiguous() src_smpl = smpls[:, 0:ns].contiguous() tsf_img = images[:, ns:].contiguous() tsf_smpl = smpls[:, ns:].contiguous() src_mask = masks[:, 0:ns].contiguous() ref_mask = masks[:, ns:].contiguous() input_G_bg, input_G_src, input_G_tsf, Tst, Ttt, src_mask, tsf_mask, head_bbox, body_bbox, uv_img = self.flow_comp(src_img, tsf_img, src_smpl, tsf_smpl, src_mask=src_mask, ref_mask=ref_mask, links_ids=links_ids, offsets=offsets, temporal=self._opt.temporal) self._real_src = src_img self._real_tsf = tsf_img self._head_bbox = head_bbox self._body_bbox = body_bbox self._body_mask = masks self._uv_img = uv_img self._Tst = Tst self._Ttt = Ttt self._input_G_src = input_G_src self._input_G_tsf = input_G_tsf if not self._share_bg: input_G_bg_tsf = torch.cat([tsf_img * tsf_mask, tsf_mask], dim=2) input_G_bg = torch.cat([input_G_bg, input_G_bg_tsf], dim=1) self._input_G_bg = input_G_bg self._real_bg = bg.view(-1, 3, self._opt.image_size, self._opt.image_size) def forward(self, keep_data_for_visuals=False, return_estimates=False): input_G_tsf = self._input_G_tsf fake_bg, fake_src_color, fake_src_mask, fake_tsf_color, fake_tsf_mask = self.G(self._input_G_bg, self._input_G_src, input_G_tsf, Tst=self._Tst, Ttt=self._Ttt, only_tsf=False) if not self._opt.share_bg: fake_bg_src = fake_bg[:, 0:self._ns] fake_bg_tsf = fake_bg[:, self._ns:self._ns + self._nt] else: fake_bg_src = fake_bg fake_bg_tsf = fake_bg fake_src_imgs = fake_src_mask * fake_bg_src + (1 - fake_src_mask) * fake_src_color fake_tsf_imgs = fake_tsf_mask * fake_bg_tsf + (1 - fake_tsf_mask) * fake_tsf_color fake_masks = torch.cat([fake_src_mask, fake_tsf_mask], dim=1) if keep_data_for_visuals: self.visual_imgs(fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks) return fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks def optimize_G(self, fake_bg, fake_src_imgs, fake_tsf_imgs, fake_masks): ns = fake_src_imgs.shape[1] bs, nt, c, h, w = fake_tsf_imgs.shape fake_bg = fake_bg.view(-1, 3, h, w) fake_tsf_imgs = fake_tsf_imgs.view(bs * nt, c, h, w) real_tsf_imgs = self._real_tsf.view(bs * nt, -1, h, w) if self._use_gan: tsf_cond = self._input_G_tsf[:, :, -3:].view(bs * nt, -1, h, w) fake_input_D = torch.cat([fake_tsf_imgs, tsf_cond], dim=1) d_inputs = { "x": fake_input_D, "bg_x": None, "body_rects": self._body_bbox, "head_rects": self._head_bbox, "get_avg": False } d_fake_outs = self.D(d_inputs) self._loss_g_adv = self.crt_gan(d_fake_outs, 0) * self._train_opts.lambda_D_prob self._loss_g_rec = (self.crt_l1(fake_src_imgs, self._real_src) + self.crt_l1(fake_bg, self._real_bg)) / 2 * self._train_opts.lambda_rec self._loss_g_tsf = self.crt_tsf(fake_tsf_imgs, real_tsf_imgs) * self._train_opts.lambda_tsf if self._train_opts.use_face: self._loss_g_face = self.crt_face( fake_tsf_imgs, real_tsf_imgs, bbox1=self._head_bbox, bbox2=self._head_bbox) * self._train_opts.lambda_face fake_masks = fake_masks.view(bs * (ns + nt), 1, h, w) body_masks = self._body_mask.view(bs * (ns + nt), 1, h, w) self._loss_g_mask = self.crt_mask(fake_masks, body_masks) * self._train_opts.lambda_mask self._loss_g_smooth = self.crt_tv(fake_masks) * self._train_opts.lambda_mask_smooth return self._loss_g_rec + self._loss_g_tsf + self._loss_g_face + self._loss_g_adv + self._loss_g_mask + self._loss_g_smooth
Apache License 2.0
demetoir/allgans
util/Stacker.py
Stacker.max_pooling
python
def max_pooling(self, filter_): return self.add_layer(max_pooling, filter_)
add max pooling layer
https://github.com/demetoir/allgans/blob/2f972db5e9a65f18aee0695d817f4acc221e54da/util/Stacker.py#L104-L106
from util.tensor_ops import * class Stacker: def __init__(self, start_layer=None, reuse=False, name="stacker"): self.reuse = reuse self.layer_count = 1 self.last_layer = start_layer self.layer_seq = [start_layer] self.name = name def add_layer(self, func, *args, **kwargs): scope_name = self.name + '_layer' + str(self.layer_count) with tf.variable_scope(scope_name, reuse=self.reuse): self.last_layer = func(self.last_layer, *args, **kwargs) self.layer_seq += [self.last_layer] pass self.layer_count += 1 return self.last_layer def bn(self): return self.add_layer(bn) def sigmoid(self): return self.add_layer(sigmoid) def lrelu(self): return self.add_layer(lrelu) def relu(self): return self.add_layer(relu) def elu(self): return self.add_layer(elu) def linear(self, output_size): return self.add_layer(linear, output_size) def conv2d_transpose(self, output_shape, filter_): return self.add_layer(conv2d_transpose, output_shape, filter_) def conv2d(self, output_channel, filter_): return self.add_layer(conv2d, output_channel, filter_) def conv2d_one_by_one(self, output_channel): return self.add_layer(conv2d_one_by_one, output_channel) def upscale_2x(self, output_channel, filter_): return self.add_layer(upscale_2x, output_channel, filter_) def upscale_2x_block(self, output_channel, filter_, activate): return self.add_layer(upscale_2x_block, output_channel, filter_, activate) def conv_block(self, output_channel, filter_, activate): return self.add_layer(conv_block, output_channel, filter_, activate) def avg_pooling(self, filter_): return self.add_layer(avg_pooling, filter_)
MIT License
paddlepaddle/paddle
python/paddle/fluid/entry_attr.py
EntryAttr._to_attr
python
def _to_attr(self): raise NotImplementedError("EntryAttr is base class")
Returns the attributes of this parameter. Returns: Parameter attributes(map): The attributes of this parameter.
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/fluid/entry_attr.py#L31-L38
from __future__ import print_function __all__ = ['ProbabilityEntry', 'CountFilterEntry'] class EntryAttr(object): def __init__(self): self._name = None
Apache License 2.0
garoe/tf_mvg
mvg_distributions/covariance_representations/covariance_conv.py
PrecisionConvCholFilters.np_off_diag_mask
python
def np_off_diag_mask(self): assert self.recons_filters_precision.shape[1:3].is_fully_defined() n = self.recons_filters_precision.shape[1].value n_width = int(np.sqrt(n)) nb = self.recons_filters_precision.shape[2].value nf = int(np.sqrt(nb)) kernel = np.zeros((nf, nf), dtype=np.float32) kernel[nf // 2 + 1:, :] = 1 kernel[nf // 2, nf // 2 + 1:] = 1 kernel_list = np.tile(kernel[np.newaxis, :, :], (n, 1, 1)) cholesky_matrix = np_ops.np_make_matrix_from_kernel_list(kernels=kernel_list, img_size=n_width) cholesky_matrix = cholesky_matrix.T return cholesky_matrix
Returns a ndarray of [n,n] that is 1 of the off-diagonal elements in L
https://github.com/garoe/tf_mvg/blob/01bc681a8b3aac5dcf0837d481b963f4968eb777/mvg_distributions/covariance_representations/covariance_conv.py#L567-L585
import numpy as np import tensorflow as tf from mvg_distributions.covariance_representations.covariance_matrix import Covariance, SampleMethod from mvg_distributions.utils.variable_filter_functions import conv2d_samples_linear_combination_filters from mvg_distributions.utils.unpooling import unpooling2d_zero_filled import mvg_distributions.utils.numpy_ops as np_ops from scipy.sparse.linalg import spsolve_triangular from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, isspmatrix_csr, SparseEfficiencyWarning from scipy.sparse import eye as sparse_eye from scipy.linalg import LinAlgError from warnings import warn import os from tqdm import tqdm class PrecisionConvFilters(Covariance): def __init__(self, weights_precision, filters_precision, sample_shape, **kwargs): super().__init__(**kwargs) self.sample_shape = sample_shape if isinstance(self.sample_shape, np.ndarray): self.sample_shape = tf.TensorShape(self.sample_shape) if isinstance(self.sample_shape, tf.TensorShape) and self.sample_shape.is_fully_defined(): assert self.sample_shape[3].value == 1, "Only one channel is supported" num_pixels = self.sample_shape[1] * self.sample_shape[2] self._matrix_shape = tf.TensorShape([self.sample_shape[0], num_pixels, num_pixels]) else: num_pixels = self.sample_shape[1] * self.sample_shape[2] self._matrix_shape = tf.stack([self.sample_shape[0], num_pixels, num_pixels], axis=0) self.weights_precision = weights_precision assert self.weights_precision is not None if filters_precision is None: self.filters_precision = self._id_filters(self.weights_precision) else: self.filters_precision = filters_precision assert self.filters_precision is not None self._diag_sqrt_covariance = None self._diag_sqrt_precision = None self._recons_filters_precision = None def _get_epsilon_flat_shape(self, num_samples): return tf.stack([self.sample_shape[0], num_samples, tf.reduce_prod(self.sample_shape[1:])], axis=0) def _get_epsilon(self, num_samples, epsilon, seed=None): epsilon_shape = self._get_epsilon_flat_shape(num_samples) if epsilon is None: epsilon = self._build_epsilon(epsilon_shape, seed=seed) if epsilon.shape.ndims + 1 == epsilon_shape.shape[0].value: epsilon = tf.expand_dims(epsilon, 1) return epsilon def _get_epsilon_img_shape(self, num_samples): return tf.concat([self.sample_shape[0:1], [num_samples], self.sample_shape[1:]], axis=0) def _get_epsilon_5_dim(self, num_samples, epsilon, seed=None): epsilon_shape = self._get_epsilon_img_shape(num_samples) if epsilon is None: epsilon = self._build_epsilon(epsilon_shape, seed=seed) if epsilon.shape.ndims == 2 or epsilon.shape.ndims == 3: flat_shape = tf.shape(epsilon) dim_4_5_shape = tf.concat([flat_shape[0:epsilon.shape.ndims - 1], self.sample_shape[1:]], axis=0) epsilon = tf.reshape(epsilon, dim_4_5_shape) if epsilon.shape.ndims + 1 == epsilon_shape.shape[0].value: epsilon = tf.expand_dims(epsilon, 1) return epsilon def _flatten_keep_sample_dim(self, inputs): if inputs.shape.ndims == 3: return inputs elif inputs.shape.ndims == 5: inputs_shape = tf.shape(inputs) flat_shape = tf.concat([inputs_shape[0:2], [tf.reduce_prod(self.sample_shape[1:])]], axis=0) return tf.reshape(inputs, shape=flat_shape) else: raise ValueError("Invalid number of dimensions {}".format(inputs.shape.ndims)) def x_precision_x(self, x, mean_batch=False, no_gradients=True, **kwargs): if no_gradients == False: raise NotImplementedError("") with tf.name_scope("x_precision_x"): x_precision = self.whiten_x(x=x, flatten_output=True) if x_precision.shape.ndims == 2: x_precision = tf.expand_dims(x_precision, axis=1) squared_error = tf.multiply(x_precision, x_precision) squared_error = tf.reduce_sum(squared_error, axis=2) if squared_error.shape[1].value == 1: squared_error = tf.squeeze(squared_error, axis=1, name="x_precision_x") if mean_batch: squared_error = tf.reduce_mean(squared_error, axis=0, name="mean_x_precision_x") return squared_error @staticmethod def _id_filters(weights_precision): with tf.name_scope('id_filters'): num_ch = 1 filter_wh = weights_precision.shape[-1].value filter_size = int(np.sqrt(filter_wh)) identity_basis = tf.eye(num_rows=filter_wh) return tf.reshape(identity_basis, (filter_wh, filter_size, filter_size, num_ch, num_ch)) @property def diag_sqrt_covariance(self): if self._diag_sqrt_covariance is None: self._diag_sqrt_covariance = self._build_diag_sqrt_covariance() return self._diag_sqrt_covariance def _build_diag_sqrt_covariance(self): with tf.name_scope("DiagSqrtCovariance"): return tf.matrix_diag_part(self.sqrt_covariance, name="diag_sqrt_covariance") @property def diag_sqrt_precision(self): if self._diag_sqrt_precision is None: self._diag_sqrt_precision = self._build_diag_sqrt_precision() return self._diag_sqrt_precision def _build_diag_sqrt_precision(self): with tf.name_scope("DiagSqrtPrecision"): return self._build_diag_from_weights(name="diag_sqrt_precision") @property def recons_filters_precision(self): if self._recons_filters_precision is None: self._recons_filters_precision = self._reconstruct_basis(weights=self.weights_precision, basis=self.filters_precision) return self._recons_filters_precision def _build_diag_from_weights(self, name="diag_from_weights"): weights = self.weights_precision filters = self.filters_precision filters.shape.assert_is_fully_defined() filters_shape = filters.shape.as_list() center_i = (np.array(filters_shape[1:3]) - 1) // 2 diag_basis = filters[:, center_i[0], center_i[1]] if weights.shape.is_fully_defined(): w_flat_shape = tf.TensorShape([weights.shape[0], weights.shape[1] * weights.shape[2], weights.shape[3]]) else: weights_shape = tf.shape(weights) w_flat_shape = tf.stack([weights_shape[0], weights_shape[1] * weights_shape[2], weights_shape[3]]) weights_flat = tf.reshape(weights, shape=w_flat_shape) diag_basis = tf.reshape(diag_basis, shape=(1, 1, filters_shape[0])) diag_covariance = tf.multiply(weights_flat, diag_basis) return tf.reduce_sum(diag_covariance, axis=2, name=name) def _build_covariance(self): with tf.name_scope("Covariance"): return self._inverse_covariance_or_precision() def _build_precision(self): with tf.name_scope("Precision"): return tf.matmul(self.sqrt_precision, self.sqrt_precision, transpose_b=True, name="precision") def _build_sqrt_covariance(self): with tf.name_scope("Covariance_Sqrt"): return tf.matrix_inverse(self.sqrt_precision, name="covariance_sqrt") def _build_sqrt_precision(self): with tf.name_scope("Precision_Sqrt"): return self._build_matrix_from_basis(weights=self.weights_precision, basis=self.filters_precision, name="precision_sqrt") def _reconstruct_basis(self, weights, basis, name="reconstruct_basis"): with tf.name_scope(name): basis.shape[3:5].assert_is_fully_defined() assert basis.shape[3] == 1 and basis.shape[4] == 1, "Only on channel is supported" basis = tf.squeeze(basis, axis=[3, 4]) if basis.shape.is_fully_defined(): basis_flat_shape = tf.TensorShape([basis.shape[0], basis.shape[1] * basis.shape[2]]) else: basis_shape = tf.shape(basis) basis_flat_shape = tf.stack([basis_shape[0], basis_shape[1] * basis_shape[2]]) if weights.shape.is_fully_defined(): weights_shape = weights.shape weights_flat_shape = tf.TensorShape( [weights.shape[0], weights.shape[1] * weights.shape[2], weights.shape[3]]) else: weights_shape = tf.shape(weights) weights_flat_shape = tf.stack([weights_shape[0], weights_shape[1] * weights_shape[2], weights_shape[3]]) basis_flat = tf.reshape(basis, shape=basis_flat_shape) basis_flat = tf.expand_dims(basis_flat, axis=0) basis_flat = tf.tile(basis_flat, multiples=tf.stack([weights_shape[0], 1, 1])) weights_flat = tf.reshape(weights, shape=weights_flat_shape) reconstructed_basis = tf.matmul(weights_flat, basis_flat) output_shape = [weights.shape[0], weights.shape[1] * weights.shape[2], basis.shape[1] * basis.shape[2]] reconstructed_basis.set_shape(output_shape) return reconstructed_basis def _build_matrix_from_basis(self, weights, basis, name=None): with tf.name_scope(name, default_name="build_matrix_from_basis"): filters = self.recons_filters_precision filters_shape, img_h, img_w = self._compute_shapes_for_filter_matrix(basis, weights) filters = tf.reshape(filters, shape=filters_shape) filters_shape, filteres_h_half, filters_w_half = self._compute_shapes_for_single_filter(filters) matrix = list() k = 0 for i in range(0, img_w): for j in range(0, img_h): filter_ij = filters[:, k] padding_w0 = i - filters_w_half padding_w1 = img_w - (filters_shape[2].value + padding_w0) padding_h0 = j - filteres_h_half padding_h1 = img_h - (filters_shape[3].value + padding_h0) if padding_w0 < 0: filter_ij = filter_ij[:, np.abs(padding_w0):filters_shape[2].value, :] padding_w0 = 0 if padding_w1 < 0: filter_ij = filter_ij[:, 0:filters_shape[2].value - np.abs(padding_w1), :] padding_w1 = 0 if padding_h0 < 0: filter_ij = filter_ij[:, :, np.abs(padding_h0):filters_shape[3].value] padding_h0 = 0 if padding_h1 < 0: filter_ij = filter_ij[:, :, 0:filters_shape[3].value - np.abs(padding_h1)] padding_h1 = 0 padding = tf.constant([[0, 0], [padding_w0, padding_w1], [padding_h0, padding_h1]]) filter_ij = tf.pad(filter_ij, paddings=padding) filter_ij = tf.layers.flatten(filter_ij) matrix.append(filter_ij) k += 1 matrix = tf.stack(matrix, axis=1) return tf.matrix_transpose(matrix) def _compute_shapes_for_single_filter(self, filters): if filters.shape[1:].is_fully_defined(): filters_shape = filters.shape assert filters_shape[2].value / 2.0 != 0, "Filter width must be an odd number" assert filters_shape[3].value / 2.0 != 0, "Filter height must be an odd number" filters_w_half = (filters_shape[2].value - 1) // 2 filteres_h_half = (filters_shape[3].value - 1) // 2 else: filters_shape = tf.shape(filters) filters_w_half = (filters_shape[2] - 1) // 2 filteres_h_half = (filters_shape[3] - 1) // 2 return filters_shape, filteres_h_half, filters_w_half def _compute_shapes_for_filter_matrix(self, basis, weights): if basis.shape.is_fully_defined() and weights.shape[1:].is_fully_defined(): weights_shape = weights.shape.as_list() if weights_shape[0] is None: batch_size = -1 else: batch_size = weights_shape[0] filters_shape = [batch_size, weights_shape[1] * weights_shape[2]] filters_shape = filters_shape + basis.shape[1:3].as_list() img_w = weights_shape[1] img_h = weights_shape[2] else: weights_shape = tf.shape(weights) filters_shape = tf.shape(basis) batch_flat_img_shape = [weights_shape[0], weights_shape[1] * weights_shape[2]] filters_shape = tf.concat(batch_flat_img_shape + filters_shape[1:3], axis=0) img_w = weights_shape[1] img_h = weights_shape[2] raise NotImplementedError("Only supported for images of known sizes") return filters_shape, img_h, img_w def _build_epsilon(self, epsilon_shape, seed=None): with tf.name_scope("Epsilon"): return tf.random_normal(shape=epsilon_shape, dtype=self.dtype, seed=seed, name="epsilon") def _sample_with_net(self, epsilon, filters, weights, name="sample_with_net"): return conv2d_samples_linear_combination_filters(inputs=epsilon, filters=filters, alpha=weights, name=name) def _sample_common(self, num_samples, epsilon, sample_method, return_epsilon, flatten_output, is_covariance): if is_covariance: name = "sample_covariance" else: name = "whiten_x" if sample_method == SampleMethod.NET: weights = self.weights_precision filters = self.filters_precision if not is_covariance: with tf.name_scope(name): epsilon = self._get_epsilon_5_dim(num_samples, epsilon) sample = self._sample_with_net(epsilon, filters, weights) if flatten_output: epsilon = self._flatten_keep_sample_dim(epsilon) sample = self._flatten_keep_sample_dim(sample) epsilon = self._squeeze_sample_dims(epsilon, name='epsilon') sample = self._squeeze_sample_dims(sample, name='sample') if return_epsilon: return sample, epsilon else: return sample else: sample_method = SampleMethod.CHOLESKY with tf.name_scope(name): epsilon = self._get_epsilon_5_dim(num_samples, epsilon) epsilon_5_dims_shape = tf.shape(epsilon) epsilon = self._flatten_keep_sample_dim(epsilon) if is_covariance: sample = super().sample_covariance(num_samples, epsilon, sample_method=sample_method) else: sample = super().whiten_x(num_samples, epsilon, sample_method=sample_method) if not flatten_output: epsilon = tf.reshape(epsilon, shape=epsilon_5_dims_shape) sample = tf.reshape(sample, shape=epsilon_5_dims_shape) sample = self._squeeze_sample_dims(sample, name='sample') epsilon = self._squeeze_sample_dims(epsilon, name='epsilon') if return_epsilon: return sample, epsilon else: return sample def sample_covariance(self, num_samples=1, epsilon=None, sample_method=None, return_epsilon=False, flatten_output=False): if sample_method is None: sample_method = SampleMethod.NET return self._sample_common(num_samples, epsilon, sample_method, return_epsilon, flatten_output, is_covariance=True) def whiten_x(self, num_samples=1, x=None, sample_method=None, return_epsilon=False, flatten_output=False): if sample_method is None: sample_method = SampleMethod.NET return self._sample_common(num_samples, x, sample_method, return_epsilon, flatten_output, is_covariance=False) class PrecisionConvCholFilters(PrecisionConvFilters): def __init__(self, weights_precision, filters_precision, sample_shape, **kwargs): super().__init__(weights_precision=weights_precision, filters_precision=filters_precision, sample_shape=sample_shape, **kwargs) self._log_diag_chol_covariance = None self._log_diag_chol_precision = None self._diag_covariance = None self._diag_precision = None self._recons_filters_precision_aligned = None self._build_with_covariance = False self.dtype = weights_precision.dtype self._t_indices = None self._l_indices = None def _build_chol_precision(self): with tf.name_scope("Precision_Chol"): return self._build_matrix_from_basis(weights=self.weights_precision, basis=self.filters_precision, name="precision_chol") def _build_covariance(self): with tf.name_scope("Covariance"): return self._inverse_covariance_or_precision() def _build_precision(self): with tf.name_scope("Precision"): return tf.matmul(self.chol_precision, self.chol_precision, transpose_b=True, name="precision") def _build_log_det_covariance_with_chol(self): log_det = 2.0 * tf.reduce_sum(self.log_diag_chol_precision, axis=1) return tf.negative(log_det, name="log_det_covar") @property def log_diag_chol_covariance(self): if self._log_diag_chol_covariance is None: self._log_diag_chol_covariance = self._build_log_diag_chol_covariance() return self._log_diag_chol_covariance @log_diag_chol_covariance.setter def log_diag_chol_covariance(self, value): self._log_diag_chol_covariance = value def _build_log_diag_chol_covariance(self): with tf.name_scope("DiagCholCovariance"): diag_c = tf.matrix_diag_part(self.chol_covariance, name="diag_chol_covariance") return tf.log(diag_c, name="log_diag_chol_covariance") @property def log_diag_chol_precision(self): if self._log_diag_chol_precision is None: self._log_diag_chol_precision = self._build_log_diag_chol_precision() return self._log_diag_chol_precision @log_diag_chol_precision.setter def log_diag_chol_precision(self, value): self._log_diag_chol_precision = value def _build_log_diag_chol_precision(self): with tf.name_scope("DiagCholPrecision"): diag_p = self._build_diag_from_weights(name="diag_chol_precision") return tf.log(diag_p, name="log_diag_chol_precision") def _build_diag_sqrt_covariance(self): with tf.name_scope("DiagSqrtCovariance"): return tf.matrix_diag_part(self.sqrt_covariance, name="diag_chol_precision") def _build_diag_sqrt_precision(self): with tf.name_scope("DiagSqrtPrecision"): return tf.matrix_diag_part(self.sqrt_precision, name="diag_chol_precision") def _build_sqrt_covariance(self): return super(PrecisionConvFilters, self)._build_sqrt_covariance() def _build_sqrt_precision(self): return super(PrecisionConvFilters, self)._build_sqrt_precision() def _conv_filter_for_diag(self, filters_shape): with tf.name_scope('Cov-Diag-Filter'): if isinstance(filters_shape, tf.Tensor): raise NotImplementedError("") half_size = (filters_shape[2] * filters_shape[3]) // 2 + 1 cov_shape = [filters_shape[2], filters_shape[3], half_size] cov_filter = np.zeros(shape=cov_shape, dtype=np.float32) f_width = cov_filter.shape[1] i = 0 j = 0 for c in range(half_size): cov_filter[i, j, -(c + 1)] = 1 j += 1 if j == f_width: j = 0 i += 1 cov_filter = np.expand_dims(cov_filter, axis=3) cov_filter = tf.constant(cov_filter) return cov_filter def _build_diag_part_with_conv(self, filters, basis, weights, name): filters_shape, img_h, img_w = self._compute_shapes_for_filter_matrix(basis, weights) with tf.name_scope(name + 'Diag-Part'): conv_diag_filter = self._conv_filter_for_diag(filters_shape) half_f_width = (filters_shape[2] * filters_shape[3]) // 2 filters = filters[:, :, half_f_width:] filters = tf.reshape(filters, (-1, img_w, img_h, half_f_width + 1)) filters_sq = tf.square(filters) diag_part = tf.nn.conv2d(filters_sq, conv_diag_filter, strides=(1, 1, 1, 1), padding='SAME') return tf.layers.flatten(diag_part) def _build_precision_diag_part(self): return self._build_diag_part_with_conv(filters=self.recons_filters_precision, basis=self.filters_precision, weights=self.weights_precision, name='Precision') def _align_filters_per_row(self, filters, basis, weights, name): filters_shape, img_h, img_w = self._compute_shapes_for_filter_matrix(basis, weights) with tf.name_scope(name + 'Row-Align'): conv_filter = self._conv_filter_for_diag(filters_shape) half_f_width = (filters_shape[2] * filters_shape[3]) // 2 zeroes = filters[:, :, 0:half_f_width] filters = filters[:, :, half_f_width:] filters = tf.reshape(filters, (-1, img_w, img_h, half_f_width + 1)) aligned_filters = tf.nn.depthwise_conv2d(filters, conv_filter, strides=(1, 1, 1, 1), padding='SAME') aligned_filters = tf.reshape(aligned_filters, (-1, img_w * img_h, half_f_width + 1)) aligned_filters = tf.concat([zeroes, aligned_filters], axis=-1) return aligned_filters @property def recons_filters_precision_aligned(self): if self._recons_filters_precision_aligned is None: self._recons_filters_precision_aligned = self._align_filters_per_row(filters=self.recons_filters_precision, basis=self.filters_precision, weights=self.weights_precision, name='Aligned-Filters') return self._recons_filters_precision_aligned
MIT License
redcokedevelopment/teapot.py
teapot/cogs/music.py
Music.queue
python
async def queue(self, ctx, page: int = 1): player = self.bot.lavalink.player_manager.get(ctx.guild.id) if not player.queue: return await ctx.send('Nothing queued.') items_per_page = 10 pages = math.ceil(len(player.queue) / items_per_page) start = (page - 1) * items_per_page end = start + items_per_page queue_list = '' for index, track in enumerate(player.queue[start:end], start=start): queue_list += f'`{index + 1}.` [**{track.title}**]({track.uri})\n' embed = discord.Embed(colour=discord.Color.blurple(), description=f'**{len(player.queue)} tracks**\n\n{queue_list}') embed.set_footer(text=f'Viewing page {page}/{pages}') await ctx.send(embed=embed)
Shows the player's queue.
https://github.com/redcokedevelopment/teapot.py/blob/aa4e92d7a1bf6f997051ae3422ba52fc034e317b/teapot/cogs/music.py#L140-L160
import math import re import discord import lavalink from discord.ext import commands import teapot url_rx = re.compile('https?:\\/\\/(?:www\\.)?.+') class Music(commands.Cog): def __init__(self, bot): self.bot = bot if not hasattr(bot, 'lavalink'): bot.lavalink = lavalink.Client(bot.user.id) bot.lavalink.add_node(teapot.config.lavalink_host(), teapot.config.lavalink_port(), teapot.config.lavalink_password(), 'zz', 'default') bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response') bot.lavalink.add_event_hook(self.track_hook) def cog_unload(self): self.bot.lavalink._event_hooks.clear() async def cog_before_invoke(self, ctx): guild_check = ctx.guild is not None if guild_check: await self.ensure_voice(ctx) return guild_check async def cog_command_error(self, ctx, error): if isinstance(error, commands.CommandInvokeError): await ctx.send(error.original) async def track_hook(self, event): if isinstance(event, lavalink.events.QueueEndEvent): guild_id = int(event.player.guild_id) await self.connect_to(guild_id, None) async def connect_to(self, guild_id: int, channel_id: str): ws = self.bot._connection._get_websocket(guild_id) await ws.voice_state(str(guild_id), channel_id) @commands.command(aliases=['p']) async def play(self, ctx, *, query: str): player = self.bot.lavalink.player_manager.get(ctx.guild.id) query = query.strip('<>') if not url_rx.match(query): query = f'ytsearch:{query}' results = await player.node.get_tracks(query) if not results or not results['tracks']: return await ctx.send('Nothing found!') embed = discord.Embed(color=discord.Color.blurple()) if results['loadType'] == 'PLAYLIST_LOADED': tracks = results['tracks'] for track in tracks: player.add(requester=ctx.author.id, track=track) embed.title = 'Playlist Enqueued!' embed.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks' else: track = results['tracks'][0] embed.title = 'Track Enqueued' embed.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})' player.add(requester=ctx.author.id, track=track) await ctx.send(embed=embed) if not player.is_playing: await player.play() @commands.command() async def seek(self, ctx, *, seconds: int): player = self.bot.lavalink.player_manager.get(ctx.guild.id) track_time = player.position + (seconds * 1000) await player.seek(track_time) await ctx.send(f'Moved track to **{lavalink.utils.format_time(track_time)}**') @commands.command(aliases=['forceskip']) async def skip(self, ctx): player = self.bot.lavalink.player_manager.get(ctx.guild.id) if not player.is_playing: return await ctx.send('Not playing.') await player.skip() await ctx.send('⏭ | Skipped.') @commands.command() async def stop(self, ctx): player = self.bot.lavalink.player_manager.get(ctx.guild.id) if not player.is_playing: return await ctx.send('Not playing.') player.queue.clear() await player.stop() await ctx.send('⏹ | Stopped.') @commands.command(aliases=['np', 'n', 'playing']) async def now(self, ctx): player = self.bot.lavalink.player_manager.get(ctx.guild.id) if not player.current: return await ctx.send('Nothing playing.') position = lavalink.utils.format_time(player.position) if player.current.stream: duration = '🔴 LIVE' else: duration = lavalink.utils.format_time(player.current.duration) song = f'**[{player.current.title}]({player.current.uri})**\n({position}/{duration})' embed = discord.Embed(color=discord.Color.blurple(), title='Now Playing', description=song) await ctx.send(embed=embed) @commands.command(aliases=['q'])
MIT License
kirthevasank/nasbot
nn/nn_comparators.py
_get_conv_filter_size_cost
python
def _get_conv_filter_size_cost(labi, labj, conv_scale): conv_diff = float(abs(int(labi[-1]) - int(labj[-1]))) return conv_scale * np.sqrt(conv_diff)
Returns the cost for comparing two different convolutional filters.
https://github.com/kirthevasank/nasbot/blob/3c745dc986be30e3721087c8fa768099032a0802/nn/nn_comparators.py#L27-L30
import numpy as np from gp.kernel import ExpSumOfDistsKernel, SumOfExpSumOfDistsKernel from nn import neural_network from utils.oper_utils import opt_transport DFLT_TRANSPORT_DIST = 'lp' DFLT_CONN_COST_FUNC = 'linear' DFLT_KERN_DIST_POWERS = 1 REPLACE_COST_INF_WITH = 7.65432e5 CONV_RES_RAW_COST_FRAC = 0.9 CNN_STRUCTURAL_PENALTY_GROUPS = ['all', 'conv', 'pool', 'fc'] MLP_STRUCTURAL_PENALTY_GROUPS = ['all', 'rectifier', 'sigmoid'] PATH_LENGTH_TYPES = ['shortest', 'longest', 'rw']
MIT License
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/core/greengrass/discovery/providers.py
DiscoveryInfoProvider.configureCredentials
python
def configureCredentials(self, caPath, certPath, keyPath): self._ca_path = caPath self._cert_path = certPath self._key_path = keyPath
**Description** Used to configure the credentials for discovery request. Should be called before the discovery request happens. **Syntax** .. code:: python myDiscoveryInfoProvider.configureCredentials("my/ca/path", "my/cert/path", "my/key/path") **Parameters** *caPath* - Path to read the root CA file. *certPath* - Path to read the certificate file. *keyPath* - Path to read the private key file. **Returns** None
https://github.com/aws/aws-iot-device-sdk-python/blob/a67eadfcbf9d229229b18435fb7a109685250854/AWSIoTPythonSDK/core/greengrass/discovery/providers.py#L145-L173
from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryInvalidRequestException from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryUnauthorizedException from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryDataNotFoundException from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryThrottlingException from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryTimeoutException from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryFailure from AWSIoTPythonSDK.core.greengrass.discovery.models import DiscoveryInfo from AWSIoTPythonSDK.core.protocol.connection.alpn import SSLContextBuilder import re import sys import ssl import time import errno import logging import socket import platform if platform.system() == 'Windows': EAGAIN = errno.WSAEWOULDBLOCK else: EAGAIN = errno.EAGAIN class DiscoveryInfoProvider(object): REQUEST_TYPE_PREFIX = "GET " PAYLOAD_PREFIX = "/greengrass/discover/thing/" PAYLOAD_SUFFIX = " HTTP/1.1\r\n" HOST_PREFIX = "Host: " HOST_SUFFIX = "\r\n\r\n" HTTP_PROTOCOL = r"HTTP/1.1 " CONTENT_LENGTH = r"content-length: " CONTENT_LENGTH_PATTERN = CONTENT_LENGTH + r"([0-9]+)\r\n" HTTP_RESPONSE_CODE_PATTERN = HTTP_PROTOCOL + r"([0-9]+) " HTTP_SC_200 = "200" HTTP_SC_400 = "400" HTTP_SC_401 = "401" HTTP_SC_404 = "404" HTTP_SC_429 = "429" LOW_LEVEL_RC_COMPLETE = 0 LOW_LEVEL_RC_TIMEOUT = -1 _logger = logging.getLogger(__name__) def __init__(self, caPath="", certPath="", keyPath="", host="", port=8443, timeoutSec=120): self._ca_path = caPath self._cert_path = certPath self._key_path = keyPath self._host = host self._port = port self._timeout_sec = timeoutSec self._expected_exception_map = { self.HTTP_SC_400 : DiscoveryInvalidRequestException(), self.HTTP_SC_401 : DiscoveryUnauthorizedException(), self.HTTP_SC_404 : DiscoveryDataNotFoundException(), self.HTTP_SC_429 : DiscoveryThrottlingException() } def configureEndpoint(self, host, port=8443): self._host = host self._port = port
Apache License 2.0
santhisenan/sdn_ddos_simulation
ddpg/replay_buffer.py
ReplayBuffer.sample_batch
python
def sample_batch(self, batch_size=32): _available_batch_length = self._count if self._count < batch_size else batch_size batch = random.sample(self._buffer, _available_batch_length) _states = np.array([_experience[0] for _experience in batch]) _actions = np.array([_experience[1] for _experience in batch]) _rewards = np.array([_experience[2] for _experience in batch]) _dones = np.array([_experience[3] for _experience in batch]) _next_states = np.array([_experience[4] for _experience in batch]) return batch
If the number of elements in the replay memory is less than the required batch_size, then return only those elements present in the memory, else return 'batch_size' number of elements.
https://github.com/santhisenan/sdn_ddos_simulation/blob/be0f812de4d2e0668f1266a71172948123e3750c/ddpg/replay_buffer.py#L24-L44
from collections import deque import random import numpy as np class ReplayBuffer(object): def __init__(self, buffer_size): self._buffer_size = buffer_size self._count = 0 self._buffer = deque() def insert(self, _experience): if(self._count <= self._buffer_size): self._buffer.append(_experience) self._count += 1 else: self._buffer.popleft() self._buffer.append(_experience) def size(self): return self._count
MIT License
hyde/hyde
setup.py
find_package_data
python
def find_package_data( where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories, only_in_packages=True, show_ignored=False): out = {} stack = [(convert_path(where), '', package, only_in_packages)] while stack: where, prefix, package, only_in_packages = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if fnmatchcase(name, pattern) or fn.lower() == pattern.lower(): bad_name = True if show_ignored: msg = "Directory {} ignored by pattern {}" sys.stderr.write(msg.format(fn, pattern)) break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else: new_package = package + '.' + name stack.append((fn, '', new_package, False)) else: stack.append((fn, prefix + name + '/', package, only_in_packages)) elif package or not only_in_packages: bad_name = False for pattern in exclude: if fnmatchcase(name, pattern) or fn.lower() == pattern.lower(): bad_name = True if show_ignored: msg = "File {} ignored by pattern {}" sys.stderr.write(msg.format(fn, pattern)) break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out
Return a dictionary suitable for use in ``package_data`` in a distutils ``setup.py`` file. The dictionary looks like:: {'package': [files]} Where ``files`` is a list of all the files in that package that don't match anything in ``exclude``. If ``only_in_packages`` is true, then top-level directories that are not packages won't be included (but directories under packages will). Directories matching any pattern in ``exclude_directories`` will be ignored; by default directories with leading ``.``, ``CVS``, and ``_darcs`` will be ignored. If ``show_ignored`` is true, then all the files that aren't included in package data are shown on stderr (for debugging purposes). Note patterns use wildcards, or can be exact paths (including leading ``./``), and all searching is case-insensitive. This function is by Ian Bicking.
https://github.com/hyde/hyde/blob/7f415402cc3e007a746eb2b5bc102281fdb415bd/setup.py#L26-L104
from setuptools import setup, find_packages from hyde.version import __version__ from distutils.util import convert_path from fnmatch import fnmatchcase import os import sys PROJECT = 'hyde' try: long_description = open('README.rst', 'rt').read() except IOError: long_description = '' standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
MIT License
openfun/marsha
src/backend/marsha/core/serializers/video.py
ThumbnailSerializer.get_urls
python
def get_urls(self, obj): if obj.uploaded_on: base = f"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.video.pk}" urls = {} stamp = time_utils.to_timestamp(obj.uploaded_on) for resolution in settings.VIDEO_RESOLUTIONS: urls[resolution] = f"{base}/thumbnails/{stamp}_{resolution}.jpg" return urls return None
Urls of the thumbnail. Parameters ---------- obj : Type[models.Thumbnail] The thumbnail that we want to serialize Returns ------- Dict or None The urls for the thumbnail. None if the thumbnail is still not uploaded to S3 with success.
https://github.com/openfun/marsha/blob/550be08d7cad91579cd1cc2548ea95751113f15d/src/backend/marsha/core/serializers/video.py#L250-L272
from datetime import timedelta from urllib.parse import quote_plus from django.conf import settings from django.shortcuts import get_object_or_404 from django.urls import reverse from django.utils import timezone from django.utils.text import slugify from botocore.signers import CloudFrontSigner from rest_framework import serializers from rest_framework_simplejwt.models import TokenUser from ..defaults import IDLE, JITSI, LIVE_CHOICES, LIVE_TYPE_CHOICES, RUNNING, STOPPED from ..models import LiveRegistration, Playlist, Thumbnail, TimedTextTrack, Video from ..models.account import ADMINISTRATOR, INSTRUCTOR, LTI_ROLES from ..utils import cloudfront_utils, time_utils, xmpp_utils from ..utils.url_utils import build_absolute_uri_behind_proxy from .base import TimestampField from .playlist import PlaylistLiteSerializer class TimedTextTrackSerializer(serializers.ModelSerializer): class Meta: model = TimedTextTrack fields = ( "active_stamp", "id", "is_ready_to_show", "mode", "language", "upload_state", "url", "source_url", "video", ) read_only_fields = ( "id", "active_stamp", "is_ready_to_show", "upload_state", "url", "video", ) active_stamp = TimestampField( source="uploaded_on", required=False, allow_null=True, read_only=True ) url = serializers.SerializerMethodField() source_url = serializers.SerializerMethodField() video = serializers.PrimaryKeyRelatedField( read_only=True, pk_field=serializers.CharField() ) is_ready_to_show = serializers.BooleanField(read_only=True) def create(self, validated_data): user = self.context["request"].user if ( self.initial_data.get("video") and user.token.get("user") and user.token["resource_id"] == user.token.get("user", {}).get("id") ): validated_data["video_id"] = self.initial_data.get("video") if not validated_data.get("video_id") and isinstance(user, TokenUser): validated_data["video_id"] = user.id return super().create(validated_data) def _sign_url(self, url): date_less_than = timezone.now() + timedelta( seconds=settings.CLOUDFRONT_SIGNED_URLS_VALIDITY ) cloudfront_signer = CloudFrontSigner( settings.CLOUDFRONT_ACCESS_KEY_ID, cloudfront_utils.rsa_signer ) return cloudfront_signer.generate_presigned_url( url, date_less_than=date_less_than ) def _generate_url(self, obj, object_path, extension=None, content_disposition=None): base = f"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.video.pk}" stamp = time_utils.to_timestamp(obj.uploaded_on) mode = f"_{obj.mode}" if obj.mode else "" url = f"{base}/{object_path}/{stamp}_{obj.language:s}{mode:s}" if extension: url = f"{url}.{extension}" if content_disposition: url = f"{url}?response-content-disposition={content_disposition}" return url def get_source_url(self, obj): if obj.uploaded_on and obj.extension: stamp = time_utils.to_timestamp(obj.uploaded_on) filename = f"{slugify(obj.video.playlist.title)}_{stamp}.{obj.extension}" url = self._generate_url( obj, "timedtext/source", content_disposition=quote_plus(f"attachment; filename={filename}"), ) if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE: url = self._sign_url(url) return url return None def get_url(self, obj): if obj.uploaded_on: url = self._generate_url(obj, "timedtext", extension="vtt") if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE: url = self._sign_url(url) return url return None class ThumbnailSerializer(serializers.ModelSerializer): class Meta: model = Thumbnail fields = ( "active_stamp", "id", "is_ready_to_show", "upload_state", "urls", "video", ) read_only_fields = ( "active_stamp", "id", "is_ready_to_show", "upload_state", "urls", "video", ) active_stamp = TimestampField( source="uploaded_on", required=False, allow_null=True, read_only=True ) video = serializers.PrimaryKeyRelatedField( read_only=True, pk_field=serializers.CharField() ) is_ready_to_show = serializers.BooleanField(read_only=True) urls = serializers.SerializerMethodField() def create(self, validated_data): user = self.context["request"].user if not validated_data.get("video_id") and isinstance(user, TokenUser): validated_data["video_id"] = user.id return super().create(validated_data)
MIT License
compas-dev/compas
src/compas/datastructures/network/matrices.py
network_adjacency_matrix
python
def network_adjacency_matrix(network, rtype='array'): key_index = network.key_index() adjacency = [[key_index[nbr] for nbr in network.neighbors(key)] for key in network.nodes()] return adjacency_matrix(adjacency, rtype=rtype)
Creates a node adjacency matrix from a Network datastructure. Parameters ---------- network : obj Network datastructure object to get data from. rtype : {'array', 'csc', 'csr', 'coo', 'list'} Format of the result. Returns ------- array-like Constructed adjacency matrix.
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas/datastructures/network/matrices.py#L33-L51
from __future__ import absolute_import from __future__ import division from __future__ import print_function from compas.numerical import adjacency_matrix from compas.numerical import degree_matrix from compas.numerical import connectivity_matrix from compas.numerical import laplacian_matrix __all__ = [ 'network_adjacency_matrix', 'network_degree_matrix', 'network_connectivity_matrix', 'network_laplacian_matrix', ] def _return_matrix(M, rtype): if rtype == 'list': return M.toarray().tolist() if rtype == 'array': return M.toarray() if rtype == 'csr': return M.tocsr() if rtype == 'csc': return M.tocsc() if rtype == 'coo': return M.tocoo() return M
MIT License
laixintao/pingtop
pingtop/ping.py
checksum
python
def checksum(source_string): sum = 0 count_to = int((len(source_string) / 2) * 2) for count in range(0, count_to, 2): this = source_string[count + 1] * 256 + source_string[count] sum = sum + this sum = sum & 0xffffffff if count_to < len(source_string): sum = sum + ord(source_string[len(source_string) - 1]) sum = sum & 0xffffffff sum = (sum >> 16) + (sum & 0xffff) sum = sum + (sum >> 16) answer = ~sum answer = answer & 0xffff answer = answer >> 8 | (answer << 8 & 0xff00) return answer
I'm not too confident that this is right but testing seems to suggest that it gives the same answers as in_cksum in ping.c
https://github.com/laixintao/pingtop/blob/75353119db1af8635fec85a9bf38722e152c426c/pingtop/ping.py#L110-L134
__version__ = "0.2" import os import select import socket import struct import sys import time ICMP_ECHO_REQUEST = 8
MIT License
zimbra-community/python-zimbra
pythonzimbra/communication.py
Communication.gen_request
python
def gen_request(self, request_type="json", token=None, set_batch=False, batch_onerror=None): if request_type == "json": local_request = RequestJson() elif request_type == "xml": local_request = RequestXml() else: raise UnknownRequestType() if token is not None: local_request.set_auth_token(token) if set_batch: local_request.enable_batch(batch_onerror) return local_request
Convenience method to quickly generate a token :param request_type: Type of request (defaults to json) :param token: Authentication token :param set_batch: Also set this request to batch mode? :param batch_onerror: Onerror-parameter for batch mode :return: The request
https://github.com/zimbra-community/python-zimbra/blob/1b4b1e0650bfab52f8df402f217ad5873f01d610/pythonzimbra/communication.py#L59-L89
from __future__ import unicode_literals import sys if sys.version < '3': import urllib2 as ur import urllib2 as ue else: import urllib.request as ur import urllib.error as ue from pythonzimbra.request_json import RequestJson from pythonzimbra.request_xml import RequestXml from pythonzimbra.response_xml import ResponseXml from pythonzimbra.response_json import ResponseJson from .exceptions.communication import * class Communication(object): url = None """ URL to the zimbra soap interface """ timeout = None """ Timeout of the request """ context = None """ SSL Context of the request """ def __init__(self, url, timeout=None, context=None): self.url = url self.timeout = timeout self.context = context if sys.version < '3' and self.url.startswith("https"): import tools.urllib2_tls
BSD 2-Clause Simplified License
partho-maple/coding-interview-gym
leetcode.com/python/457_Circular_Array_Loop.py
Solution.circularArrayLoop
python
def circularArrayLoop(self, nums): for i in range(len(nums)): is_forward = nums[i] >= 0 slow, fast = i, i while True: slow = self.find_next_index(nums, is_forward, slow) fast = self.find_next_index(nums, is_forward, fast) if (fast != -1): fast = self.find_next_index(nums, is_forward, fast) if slow == -1 or fast == -1 or slow == fast: break if slow != -1 and slow == fast: return True return False
:type nums: List[int] :rtype: bool
https://github.com/partho-maple/coding-interview-gym/blob/f11c78b6e42d1014296fc0f360aa6fc530600493/leetcode.com/python/457_Circular_Array_Loop.py#L2-L26
class Solution(object):
MIT License
mpi4jax/mpi4jax
examples/shallow_water.py
get_initial_conditions
python
def get_initial_conditions(): u0_global = 10 * jnp.exp( -((yy_global - 0.5 * length_y) ** 2) / (0.02 * length_x) ** 2 ) v0_global = jnp.zeros_like(u0_global) coriolis_global = CORIOLIS_F + yy_global * CORIOLIS_BETA h_geostrophy = np.cumsum(-dy * u0_global * coriolis_global / GRAVITY, axis=0) h0_global = ( DEPTH + h_geostrophy - h_geostrophy.mean() + 0.2 * np.sin(xx_global / length_x * 10 * np.pi) * np.cos(yy_global / length_y * 8 * np.pi) ) h0_local = h0_global[local_slice] u0_local = u0_global[local_slice] v0_local = v0_global[local_slice] token = jax.lax.create_token() h0_local, token = enforce_boundaries(h0_local, "h", token) u0_local, token = enforce_boundaries(u0_local, "u", token) v0_local, token = enforce_boundaries(v0_local, "v", token) return h0_local, u0_local, v0_local
For the initial conditions, we use a horizontal jet in geostrophic balance.
https://github.com/mpi4jax/mpi4jax/blob/e3ed6f00a5552099f260c6b1f68588917461403b/examples/shallow_water.py#L139-L170
import os import sys import math import time import warnings from contextlib import ExitStack from collections import namedtuple from functools import partial import numpy as np from mpi4py import MPI try: import tqdm except ImportError: warnings.warn("Could not import tqdm, can't show progress bar") HAS_TQDM = False else: HAS_TQDM = True mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() mpi_size = mpi_comm.Get_size() os.environ["CUDA_VISIBLE_DEVICES"] = str(mpi_rank) import jax import jax.numpy as jnp import mpi4jax supported_nproc = (1, 2, 4, 6, 8, 16) if mpi_size not in supported_nproc: raise RuntimeError( f"Got invalid number of MPI processes: {mpi_size}. " f"Please choose one of these: {supported_nproc}." ) nproc_y = min(mpi_size, 2) nproc_x = mpi_size // nproc_y proc_idx = np.unravel_index(mpi_rank, (nproc_y, nproc_x)) nx_global = 360 + 2 ny_global = 180 + 2 dx = 5e3 dy = 5e3 assert (nx_global - 2) % nproc_x == 0 assert (ny_global - 2) % nproc_y == 0 nx_local = (nx_global - 2) // nproc_x + 2 ny_local = (ny_global - 2) // nproc_y + 2 x_global, y_global = ( np.arange(-1, nx_global - 1) * dx, np.arange(-1, ny_global - 1) * dy, ) yy_global, xx_global = np.meshgrid(y_global, x_global, indexing="ij") length_x = x_global[-2] - x_global[1] length_y = y_global[-2] - y_global[1] local_slice = ( slice((ny_local - 2) * proc_idx[0], (ny_local - 2) * proc_idx[0] + ny_local), slice((nx_local - 2) * proc_idx[1], (nx_local - 2) * proc_idx[1] + nx_local), ) x_local = x_global[local_slice[1]] y_local = y_global[local_slice[0]] xx_local = xx_global[local_slice] yy_local = yy_global[local_slice] GRAVITY = 9.81 DEPTH = 100.0 CORIOLIS_F = 2e-4 CORIOLIS_BETA = 2e-11 CORIOLIS_PARAM = CORIOLIS_F + yy_local * CORIOLIS_BETA LATERAL_VISCOSITY = 1e-3 * CORIOLIS_F * dx ** 2 DAY_IN_SECONDS = 86_400 PERIODIC_BOUNDARY_X = True ADAMS_BASHFORTH_A = 1.5 + 0.1 ADAMS_BASHFORTH_B = -(0.5 + 0.1) PLOT_ETA_RANGE = 10 PLOT_EVERY = 100 MAX_QUIVERS = (25, 50) dt = 0.125 * min(dx, dy) / np.sqrt(GRAVITY * DEPTH) @jax.jit
MIT License
elsonidoq/fito
fito/data_store/base.py
BaseDataStore.get_id
python
def get_id(self, spec): raise NotImplementedError()
Get's the internal id of a given spec, it should raise KeyError if spec not in self
https://github.com/elsonidoq/fito/blob/e76ab0a9a4eb954b6d88c190cb57d112f94739e1/fito/data_store/base.py#L82-L86
from fito import config import warnings from functools import wraps from fito import Spec from fito.data_store.rehash_ui import RehashUI from fito.operation_runner import FifoCache, OperationRunner from fito.operations.decorate import as_operation from fito.specs.base import get_import_path from fito.specs.fields import NumericField, PrimitiveField from fito.specs.utils import matching_fields class BaseDataStore(OperationRunner): get_cache_size = NumericField(default=0) verbose = PrimitiveField(default=False, serialize=False) def __init__(self, *args, **kwargs): super(BaseDataStore, self).__init__(*args, **kwargs) if self.get_cache_size > 0: self.get_cache = FifoCache(self.get_cache_size) else: self.get_cache = None @classmethod def get_key(cls, spec): if isinstance(spec, Spec): return spec.key else: assert isinstance(spec, dict) return Spec._dict2key(spec) def get(self, spec): def _get(): try: return self._get(spec) except KeyError, e: if config.interactive_rehash and spec not in RehashUI.ignored_specs: if self.interactive_rehash(spec): return self.get(spec) else: raise e else: raise e if self.get_cache is None: return _get() else: try: return self.get_cache[spec] except KeyError: res = _get() self.get_cache.set(spec, res) return res def _get(self, spec): raise NotImplementedError()
MIT License
dedsecinside/awesome-scripts
APIs/Telegram API/telethon/client/telegrambaseclient.py
TelegramBaseClient._create_exported_sender
python
async def _create_exported_sender(self: 'TelegramClient', dc_id): dc = await self._get_dc(dc_id) sender = MTProtoSender(None, loggers=self._log) await sender.connect(self._connection( dc.ip_address, dc.port, dc.id, loggers=self._log, proxy=self._proxy, local_addr=self._local_addr )) self._log[__name__].info('Exporting auth for new borrowed sender in %s', dc) auth = await self(functions.auth.ExportAuthorizationRequest(dc_id)) req = self._init_with(functions.auth.ImportAuthorizationRequest( id=auth.id, bytes=auth.bytes )) await sender.send(req) return sender
Creates a new exported `MTProtoSender` for the given `dc_id` and returns it. This method should be used by `_borrow_exported_sender`.
https://github.com/dedsecinside/awesome-scripts/blob/856835e5ff5f8a6af2d74bb25800c620feb712e3/APIs/Telegram API/telethon/client/telegrambaseclient.py#L666-L693
import abc import re import asyncio import collections import logging import platform import time import typing from .. import version, helpers, __name__ as __base_name__ from ..crypto import rsa from ..entitycache import EntityCache from ..extensions import markdown from ..network import MTProtoSender, Connection, ConnectionTcpFull, TcpMTProxy from ..sessions import Session, SQLiteSession, MemorySession from ..statecache import StateCache from ..tl import functions, types from ..tl.alltlobjects import LAYER DEFAULT_DC_ID = 2 DEFAULT_IPV4_IP = '149.154.167.51' DEFAULT_IPV6_IP = '2001:67c:4e8:f002::a' DEFAULT_PORT = 443 if typing.TYPE_CHECKING: from .telegramclient import TelegramClient __default_log__ = logging.getLogger(__base_name__) __default_log__.addHandler(logging.NullHandler()) _DISCONNECT_EXPORTED_AFTER = 60 class _ExportState: def __init__(self): self._n = 0 self._zero_ts = 0 self._connected = False def add_borrow(self): self._n += 1 self._connected = True def add_return(self): self._n -= 1 assert self._n >= 0, 'returned sender more than it was borrowed' if self._n == 0: self._zero_ts = time.time() def should_disconnect(self): return (self._n == 0 and self._connected and (time.time() - self._zero_ts) > _DISCONNECT_EXPORTED_AFTER) def need_connect(self): return not self._connected def mark_disconnected(self): assert self.should_disconnect(), 'marked as disconnected when it was borrowed' self._connected = False class TelegramBaseClient(abc.ABC): __version__ = version.__version__ _config = None _cdn_config = None def __init__( self: 'TelegramClient', session: 'typing.Union[str, Session]', api_id: int, api_hash: str, *, connection: 'typing.Type[Connection]' = ConnectionTcpFull, use_ipv6: bool = False, proxy: typing.Union[tuple, dict] = None, local_addr=None, timeout: int = 10, request_retries: int = 5, connection_retries: int =5, retry_delay: int = 1, auto_reconnect: bool = True, sequential_updates: bool = False, flood_sleep_threshold: int = 60, raise_last_call_error: bool = False, device_model: str = None, system_version: str = None, app_version: str = None, lang_code: str = 'en', system_lang_code: str = 'en', loop: asyncio.AbstractEventLoop = None, base_logger: typing.Union[str, logging.Logger] = None): if not api_id or not api_hash: raise ValueError( "Your API ID or Hash cannot be empty or None. " "Refer to telethon.rtfd.io for more information.") self._use_ipv6 = use_ipv6 self._loop = asyncio.get_event_loop() if isinstance(base_logger, str): base_logger = logging.getLogger(base_logger) elif not isinstance(base_logger, logging.Logger): base_logger = __default_log__ class _Loggers(dict): def __missing__(self, key): if key.startswith("telethon."): key = key.split('.', maxsplit=1)[1] return base_logger.getChild(key) self._log = _Loggers() if isinstance(session, str) or session is None: try: session = SQLiteSession(session) except ImportError: import warnings warnings.warn( 'The sqlite3 module is not available under this ' 'Python installation and no custom session ' 'instance was given; using MemorySession.\n' 'You will need to re-login every time unless ' 'you use another session storage' ) session = MemorySession() elif not isinstance(session, Session): raise TypeError( 'The given session must be a str or a Session instance.' ) if (not session.server_address or (':' in session.server_address) != use_ipv6): session.set_dc( DEFAULT_DC_ID, DEFAULT_IPV6_IP if self._use_ipv6 else DEFAULT_IPV4_IP, DEFAULT_PORT ) self.flood_sleep_threshold = flood_sleep_threshold self.session = session self._entity_cache = EntityCache() self.api_id = int(api_id) self.api_hash = api_hash if not callable(getattr(self._loop, 'sock_connect', None)): raise TypeError( 'Event loop of type {} lacks `sock_connect`, which is needed to use proxies.\n\n' 'Change the event loop in use to use proxies:\n' '# https://github.com/LonamiWebs/Telethon/issues/1337\n' 'import asyncio\n' 'asyncio.set_event_loop(asyncio.SelectorEventLoop())'.format( self._loop.__class__.__name__ ) ) if local_addr is not None: if use_ipv6 is False and ':' in local_addr: raise TypeError( 'A local IPv6 address must only be used with `use_ipv6=True`.' ) elif use_ipv6 is True and ':' not in local_addr: raise TypeError( '`use_ipv6=True` must only be used with a local IPv6 address.' ) self._raise_last_call_error = raise_last_call_error self._request_retries = request_retries self._connection_retries = connection_retries self._retry_delay = retry_delay or 0 self._proxy = proxy self._local_addr = local_addr self._timeout = timeout self._auto_reconnect = auto_reconnect assert isinstance(connection, type) self._connection = connection init_proxy = None if not issubclass(connection, TcpMTProxy) else types.InputClientProxy(*connection.address_info(proxy)) system = platform.uname() if system.machine in ('x86_64', 'AMD64'): default_device_model = 'PC 64bit' elif system.machine in ('i386','i686','x86'): default_device_model = 'PC 32bit' else: default_device_model = system.machine default_system_version = re.sub(r'-.+','',system.release) self._init_with = lambda x: functions.InvokeWithLayerRequest( LAYER, functions.InitConnectionRequest( api_id=self.api_id, device_model=device_model or default_device_model or 'Unknown', system_version=system_version or default_system_version or '1.0', app_version=app_version or self.__version__, lang_code=lang_code, system_lang_code=system_lang_code, lang_pack='', query=x, proxy=init_proxy ) ) self._sender = MTProtoSender( self.session.auth_key, loggers=self._log, retries=self._connection_retries, delay=self._retry_delay, auto_reconnect=self._auto_reconnect, connect_timeout=self._timeout, auth_key_callback=self._auth_key_callback, update_callback=self._handle_update, auto_reconnect_callback=self._handle_auto_reconnect ) self._flood_waited_requests = {} self._borrowed_senders = {} self._borrow_sender_lock = asyncio.Lock() self._updates_handle = None self._last_request = time.time() self._channel_pts = {} if sequential_updates: self._updates_queue = asyncio.Queue() self._dispatching_updates_queue = asyncio.Event() else: self._updates_queue = set() self._dispatching_updates_queue = None self._authorized = None self._state_cache = StateCache( self.session.get_update_state(0), self._log) self._event_builders = [] self._conversations = collections.defaultdict(set) self._albums = {} self._parse_mode = markdown self._phone_code_hash = {} self._phone = None self._tos = None self._self_input_peer = None self._bot = None self._megagroup_cache = {} @property def loop(self: 'TelegramClient') -> asyncio.AbstractEventLoop: return self._loop @property def disconnected(self: 'TelegramClient') -> asyncio.Future: return self._sender.disconnected @property def flood_sleep_threshold(self): return self._flood_sleep_threshold @flood_sleep_threshold.setter def flood_sleep_threshold(self, value): self._flood_sleep_threshold = min(value or 0, 24 * 60 * 60) async def connect(self: 'TelegramClient') -> None: if not await self._sender.connect(self._connection( self.session.server_address, self.session.port, self.session.dc_id, loggers=self._log, proxy=self._proxy, local_addr=self._local_addr )): return self.session.auth_key = self._sender.auth_key self.session.save() await self._sender.send(self._init_with( functions.help.GetConfigRequest())) self._updates_handle = self._loop.create_task(self._update_loop()) def is_connected(self: 'TelegramClient') -> bool: sender = getattr(self, '_sender', None) return sender and sender.is_connected() def disconnect(self: 'TelegramClient'): if self._loop.is_running(): return self._disconnect_coro() else: try: self._loop.run_until_complete(self._disconnect_coro()) except RuntimeError: pass async def _disconnect_coro(self: 'TelegramClient'): await self._disconnect() async with self._borrow_sender_lock: for state, sender in self._borrowed_senders.values(): if state.should_disconnect(): await sender.disconnect() self._borrowed_senders.clear() if self._dispatching_updates_queue is None and self._updates_queue: for task in self._updates_queue: task.cancel() await asyncio.wait(self._updates_queue) self._updates_queue.clear() pts, date = self._state_cache[None] if pts and date: self.session.set_update_state(0, types.updates.State( pts=pts, qts=0, date=date, seq=0, unread_count=0 )) self.session.close() async def _disconnect(self: 'TelegramClient'): await self._sender.disconnect() await helpers._cancel(self._log[__name__], updates_handle=self._updates_handle) async def _switch_dc(self: 'TelegramClient', new_dc): self._log[__name__].info('Reconnecting to new data center %s', new_dc) dc = await self._get_dc(new_dc) self.session.set_dc(dc.id, dc.ip_address, dc.port) self._sender.auth_key.key = None self.session.auth_key = None self.session.save() await self._disconnect() return await self.connect() def _auth_key_callback(self: 'TelegramClient', auth_key): self.session.auth_key = auth_key self.session.save() async def _get_dc(self: 'TelegramClient', dc_id, cdn=False): cls = self.__class__ if not cls._config: cls._config = await self(functions.help.GetConfigRequest()) if cdn and not self._cdn_config: cls._cdn_config = await self(functions.help.GetCdnConfigRequest()) for pk in cls._cdn_config.public_keys: rsa.add_key(pk.public_key) return next( dc for dc in cls._config.dc_options if dc.id == dc_id and bool(dc.ipv6) == self._use_ipv6 and bool(dc.cdn) == cdn )
MIT License
libcity/bigscity-libcity-datasets
old_backup/nyc_taxi_od.py
convert_to_trajectory
python
def convert_to_trajectory(df): start = df[['drive_id', 'PULocationID', 'tpep_pickup_datetime']] end = df[['drive_id', 'DOLocationID', 'tpep_dropoff_datetime']] start.columns = ['driveid', 'geo_id', 'time_str'] end.columns = ['driveid', 'geo_id', 'time_str'] trajectory_data = pd.concat((start, end), axis=0) trajectory_data = trajectory_data.loc[trajectory_data['geo_id'].apply(lambda x: not math.isnan(x))] trajectory_data = convert_time(trajectory_data) return trajectory_data[['driveid', 'geo_id', 'time', 'timestamp']]
:param df: all data :return: df['driveid', 'geo_id', 'time', 'timestamp']
https://github.com/libcity/bigscity-libcity-datasets/blob/9d686af4731d7db821298345734926c0437703e6/old_backup/nyc_taxi_od.py#L95-L107
import json import math import os from datetime import datetime import numpy as np import pandas as pd old_time_format = '%Y-%m-%d %H:%M:%S' new_time_format = '%Y-%m-%dT%H:%M:%SZ' def get_data_url(input_dir_flow, start_year, start_month, end_year, end_month): pattern = input_dir_flow + "/yellow_tripdata_%d-%02d.csv" data_url = [] i = start_year while i <= end_year: j = start_month if i == start_year else 1 end_j = end_month if i == end_year else 12 while j <= end_j: data_url.append(pattern % (i, j)) j += 1 i += 1 return data_url def handle_area_geo(df): start = df[['PULocationID']] start.columns = ['a_id'] end = df[['DOLocationID']] end.columns = ['a_id'] area_data = pd.concat((start, end), axis=0) area_data['a_id'] = area_data['a_id'].astype('int') area_data = area_data.sort_values(by='a_id', ascending=True) area_data = area_data[['a_id']] area_data = area_data.drop_duplicates() return area_data def judge_id(value, dividing_points, equally=True): if equally: min_v = dividing_points[0] interval = dividing_points[1] - dividing_points[0] idx = int((value - min_v) / interval) max_id = len(dividing_points) - 2 return min(max_id, idx) else: for i, num in enumerate(dividing_points): if value <= num: return i - 1 return len(dividing_points) def get_geo_data(area_geo): geo_data = pd.DataFrame( columns=['geo_id', 'type', 'coordinates']) geo_data['geo_id'] = area_geo['a_id'] geo_data.loc[:, 'type'] = 'state' geo_data.loc[:, 'coordinates'] = '[[ [] ]]' return geo_data def convert_time(df): df['time'] = df.apply( lambda x: x['time_str'].replace(' ', 'T') + 'Z', axis=1) df['timestamp'] = df.apply( lambda x: float(datetime.timestamp( pd.to_datetime(x['time_str'], utc=True, format=old_time_format))), axis=1) return df
Apache License 2.0
ericsson/codechecker
codechecker_common/skiplist_handler.py
SkipListHandler.__call__
python
def __call__(self, source_file_path: str) -> bool: return self.should_skip(source_file_path)
Check if the given source should be skipped.
https://github.com/ericsson/codechecker/blob/d2db1b49e8a2b775d41436406b5a2e5d9af76c0f/codechecker_common/skiplist_handler.py#L105-L109
import fnmatch import re import os from codechecker_common.logger import get_logger LOG = get_logger('system') class SkipListHandler: def __init__(self, skip_file_content=""): self.__skip = [] if not skip_file_content: skip_file_content = "" self.__skip_file_lines = [line.strip() for line in skip_file_content.splitlines() if line.strip()] valid_lines = self.__check_line_format(self.__skip_file_lines) self.__gen_regex(valid_lines) def __gen_regex(self, skip_lines): for skip_line in skip_lines: norm_skip_path = os.path.normpath(skip_line[1:].strip()) rexpr = re.compile( fnmatch.translate(norm_skip_path + '*')) self.__skip.append((skip_line, rexpr)) def __check_line_format(self, skip_lines): valid_lines = [] for line in skip_lines: if len(line) < 2 or line[0] not in ['-', '+']: LOG.warning("Skipping malformed skipfile pattern: %s", line) continue valid_lines.append(line) return valid_lines @property def skip_file_lines(self): return self.__skip_file_lines def overwrite_skip_content(self, skip_lines): self.__skip = [] valid_lines = self.__check_line_format(skip_lines) self.__gen_regex(valid_lines) def should_skip(self, source): if not self.__skip: return False for line, rexpr in self.__skip: if rexpr.match(source): sign = line[0] return sign == '-' return False
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/commission_number.py
CommissionNumber.anchor_x_offset
python
def anchor_x_offset(self): return self._anchor_x_offset
Gets the anchor_x_offset of this CommissionNumber. # noqa: E501 Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501 :return: The anchor_x_offset of this CommissionNumber. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/commission_number.py#L803-L811
import pprint import re import six from docusign_esign.client.configuration import Configuration class CommissionNumber(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'anchor_allow_white_space_in_characters': 'str', 'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata', 'anchor_case_sensitive': 'str', 'anchor_case_sensitive_metadata': 'PropertyMetadata', 'anchor_horizontal_alignment': 'str', 'anchor_horizontal_alignment_metadata': 'PropertyMetadata', 'anchor_ignore_if_not_present': 'str', 'anchor_ignore_if_not_present_metadata': 'PropertyMetadata', 'anchor_match_whole_word': 'str', 'anchor_match_whole_word_metadata': 'PropertyMetadata', 'anchor_string': 'str', 'anchor_string_metadata': 'PropertyMetadata', 'anchor_tab_processor_version': 'str', 'anchor_tab_processor_version_metadata': 'PropertyMetadata', 'anchor_units': 'str', 'anchor_units_metadata': 'PropertyMetadata', 'anchor_x_offset': 'str', 'anchor_x_offset_metadata': 'PropertyMetadata', 'anchor_y_offset': 'str', 'anchor_y_offset_metadata': 'PropertyMetadata', 'bold': 'str', 'bold_metadata': 'PropertyMetadata', 'conceal_value_on_document': 'str', 'conceal_value_on_document_metadata': 'PropertyMetadata', 'conditional_parent_label': 'str', 'conditional_parent_label_metadata': 'PropertyMetadata', 'conditional_parent_value': 'str', 'conditional_parent_value_metadata': 'PropertyMetadata', 'custom_tab_id': 'str', 'custom_tab_id_metadata': 'PropertyMetadata', 'disable_auto_size': 'str', 'disable_auto_size_metadata': 'PropertyMetadata', 'document_id': 'str', 'document_id_metadata': 'PropertyMetadata', 'error_details': 'ErrorDetails', 'font': 'str', 'font_color': 'str', 'font_color_metadata': 'PropertyMetadata', 'font_metadata': 'PropertyMetadata', 'font_size': 'str', 'font_size_metadata': 'PropertyMetadata', 'form_order': 'str', 'form_order_metadata': 'PropertyMetadata', 'form_page_label': 'str', 'form_page_label_metadata': 'PropertyMetadata', 'form_page_number': 'str', 'form_page_number_metadata': 'PropertyMetadata', 'height': 'str', 'height_metadata': 'PropertyMetadata', 'italic': 'str', 'italic_metadata': 'PropertyMetadata', 'locale_policy': 'LocalePolicyTab', 'locked': 'str', 'locked_metadata': 'PropertyMetadata', 'max_length': 'str', 'max_length_metadata': 'PropertyMetadata', 'merge_field': 'MergeField', 'merge_field_xml': 'str', 'name': 'str', 'name_metadata': 'PropertyMetadata', 'original_value': 'str', 'original_value_metadata': 'PropertyMetadata', 'page_number': 'str', 'page_number_metadata': 'PropertyMetadata', 'recipient_id': 'str', 'recipient_id_guid': 'str', 'recipient_id_guid_metadata': 'PropertyMetadata', 'recipient_id_metadata': 'PropertyMetadata', 'required': 'str', 'required_metadata': 'PropertyMetadata', 'smart_contract_information': 'SmartContractInformation', 'source': 'str', 'status': 'str', 'status_metadata': 'PropertyMetadata', 'tab_group_labels': 'list[str]', 'tab_group_labels_metadata': 'PropertyMetadata', 'tab_id': 'str', 'tab_id_metadata': 'PropertyMetadata', 'tab_label': 'str', 'tab_label_metadata': 'PropertyMetadata', 'tab_order': 'str', 'tab_order_metadata': 'PropertyMetadata', 'tab_type': 'str', 'tab_type_metadata': 'PropertyMetadata', 'template_locked': 'str', 'template_locked_metadata': 'PropertyMetadata', 'template_required': 'str', 'template_required_metadata': 'PropertyMetadata', 'tooltip': 'str', 'tool_tip_metadata': 'PropertyMetadata', 'underline': 'str', 'underline_metadata': 'PropertyMetadata', 'value': 'str', 'value_metadata': 'PropertyMetadata', 'width': 'str', 'width_metadata': 'PropertyMetadata', 'x_position': 'str', 'x_position_metadata': 'PropertyMetadata', 'y_position': 'str', 'y_position_metadata': 'PropertyMetadata' } attribute_map = { 'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters', 'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata', 'anchor_case_sensitive': 'anchorCaseSensitive', 'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata', 'anchor_horizontal_alignment': 'anchorHorizontalAlignment', 'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata', 'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent', 'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata', 'anchor_match_whole_word': 'anchorMatchWholeWord', 'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata', 'anchor_string': 'anchorString', 'anchor_string_metadata': 'anchorStringMetadata', 'anchor_tab_processor_version': 'anchorTabProcessorVersion', 'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata', 'anchor_units': 'anchorUnits', 'anchor_units_metadata': 'anchorUnitsMetadata', 'anchor_x_offset': 'anchorXOffset', 'anchor_x_offset_metadata': 'anchorXOffsetMetadata', 'anchor_y_offset': 'anchorYOffset', 'anchor_y_offset_metadata': 'anchorYOffsetMetadata', 'bold': 'bold', 'bold_metadata': 'boldMetadata', 'conceal_value_on_document': 'concealValueOnDocument', 'conceal_value_on_document_metadata': 'concealValueOnDocumentMetadata', 'conditional_parent_label': 'conditionalParentLabel', 'conditional_parent_label_metadata': 'conditionalParentLabelMetadata', 'conditional_parent_value': 'conditionalParentValue', 'conditional_parent_value_metadata': 'conditionalParentValueMetadata', 'custom_tab_id': 'customTabId', 'custom_tab_id_metadata': 'customTabIdMetadata', 'disable_auto_size': 'disableAutoSize', 'disable_auto_size_metadata': 'disableAutoSizeMetadata', 'document_id': 'documentId', 'document_id_metadata': 'documentIdMetadata', 'error_details': 'errorDetails', 'font': 'font', 'font_color': 'fontColor', 'font_color_metadata': 'fontColorMetadata', 'font_metadata': 'fontMetadata', 'font_size': 'fontSize', 'font_size_metadata': 'fontSizeMetadata', 'form_order': 'formOrder', 'form_order_metadata': 'formOrderMetadata', 'form_page_label': 'formPageLabel', 'form_page_label_metadata': 'formPageLabelMetadata', 'form_page_number': 'formPageNumber', 'form_page_number_metadata': 'formPageNumberMetadata', 'height': 'height', 'height_metadata': 'heightMetadata', 'italic': 'italic', 'italic_metadata': 'italicMetadata', 'locale_policy': 'localePolicy', 'locked': 'locked', 'locked_metadata': 'lockedMetadata', 'max_length': 'maxLength', 'max_length_metadata': 'maxLengthMetadata', 'merge_field': 'mergeField', 'merge_field_xml': 'mergeFieldXml', 'name': 'name', 'name_metadata': 'nameMetadata', 'original_value': 'originalValue', 'original_value_metadata': 'originalValueMetadata', 'page_number': 'pageNumber', 'page_number_metadata': 'pageNumberMetadata', 'recipient_id': 'recipientId', 'recipient_id_guid': 'recipientIdGuid', 'recipient_id_guid_metadata': 'recipientIdGuidMetadata', 'recipient_id_metadata': 'recipientIdMetadata', 'required': 'required', 'required_metadata': 'requiredMetadata', 'smart_contract_information': 'smartContractInformation', 'source': 'source', 'status': 'status', 'status_metadata': 'statusMetadata', 'tab_group_labels': 'tabGroupLabels', 'tab_group_labels_metadata': 'tabGroupLabelsMetadata', 'tab_id': 'tabId', 'tab_id_metadata': 'tabIdMetadata', 'tab_label': 'tabLabel', 'tab_label_metadata': 'tabLabelMetadata', 'tab_order': 'tabOrder', 'tab_order_metadata': 'tabOrderMetadata', 'tab_type': 'tabType', 'tab_type_metadata': 'tabTypeMetadata', 'template_locked': 'templateLocked', 'template_locked_metadata': 'templateLockedMetadata', 'template_required': 'templateRequired', 'template_required_metadata': 'templateRequiredMetadata', 'tooltip': 'tooltip', 'tool_tip_metadata': 'toolTipMetadata', 'underline': 'underline', 'underline_metadata': 'underlineMetadata', 'value': 'value', 'value_metadata': 'valueMetadata', 'width': 'width', 'width_metadata': 'widthMetadata', 'x_position': 'xPosition', 'x_position_metadata': 'xPositionMetadata', 'y_position': 'yPosition', 'y_position_metadata': 'yPositionMetadata' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._anchor_allow_white_space_in_characters = None self._anchor_allow_white_space_in_characters_metadata = None self._anchor_case_sensitive = None self._anchor_case_sensitive_metadata = None self._anchor_horizontal_alignment = None self._anchor_horizontal_alignment_metadata = None self._anchor_ignore_if_not_present = None self._anchor_ignore_if_not_present_metadata = None self._anchor_match_whole_word = None self._anchor_match_whole_word_metadata = None self._anchor_string = None self._anchor_string_metadata = None self._anchor_tab_processor_version = None self._anchor_tab_processor_version_metadata = None self._anchor_units = None self._anchor_units_metadata = None self._anchor_x_offset = None self._anchor_x_offset_metadata = None self._anchor_y_offset = None self._anchor_y_offset_metadata = None self._bold = None self._bold_metadata = None self._conceal_value_on_document = None self._conceal_value_on_document_metadata = None self._conditional_parent_label = None self._conditional_parent_label_metadata = None self._conditional_parent_value = None self._conditional_parent_value_metadata = None self._custom_tab_id = None self._custom_tab_id_metadata = None self._disable_auto_size = None self._disable_auto_size_metadata = None self._document_id = None self._document_id_metadata = None self._error_details = None self._font = None self._font_color = None self._font_color_metadata = None self._font_metadata = None self._font_size = None self._font_size_metadata = None self._form_order = None self._form_order_metadata = None self._form_page_label = None self._form_page_label_metadata = None self._form_page_number = None self._form_page_number_metadata = None self._height = None self._height_metadata = None self._italic = None self._italic_metadata = None self._locale_policy = None self._locked = None self._locked_metadata = None self._max_length = None self._max_length_metadata = None self._merge_field = None self._merge_field_xml = None self._name = None self._name_metadata = None self._original_value = None self._original_value_metadata = None self._page_number = None self._page_number_metadata = None self._recipient_id = None self._recipient_id_guid = None self._recipient_id_guid_metadata = None self._recipient_id_metadata = None self._required = None self._required_metadata = None self._smart_contract_information = None self._source = None self._status = None self._status_metadata = None self._tab_group_labels = None self._tab_group_labels_metadata = None self._tab_id = None self._tab_id_metadata = None self._tab_label = None self._tab_label_metadata = None self._tab_order = None self._tab_order_metadata = None self._tab_type = None self._tab_type_metadata = None self._template_locked = None self._template_locked_metadata = None self._template_required = None self._template_required_metadata = None self._tooltip = None self._tool_tip_metadata = None self._underline = None self._underline_metadata = None self._value = None self._value_metadata = None self._width = None self._width_metadata = None self._x_position = None self._x_position_metadata = None self._y_position = None self._y_position_metadata = None self.discriminator = None setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None)) setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None)) setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None)) setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None)) setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None)) setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None)) setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None)) setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None)) setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None)) setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None)) setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None)) setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None)) setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None)) setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None)) setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None)) setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None)) setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None)) setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None)) setattr(self, "_{}".format('bold'), kwargs.get('bold', None)) setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None)) setattr(self, "_{}".format('conceal_value_on_document'), kwargs.get('conceal_value_on_document', None)) setattr(self, "_{}".format('conceal_value_on_document_metadata'), kwargs.get('conceal_value_on_document_metadata', None)) setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None)) setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None)) setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None)) setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None)) setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None)) setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None)) setattr(self, "_{}".format('disable_auto_size'), kwargs.get('disable_auto_size', None)) setattr(self, "_{}".format('disable_auto_size_metadata'), kwargs.get('disable_auto_size_metadata', None)) setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None)) setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('font'), kwargs.get('font', None)) setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None)) setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None)) setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None)) setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None)) setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None)) setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None)) setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None)) setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None)) setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None)) setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None)) setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None)) setattr(self, "_{}".format('height'), kwargs.get('height', None)) setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None)) setattr(self, "_{}".format('italic'), kwargs.get('italic', None)) setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None)) setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None)) setattr(self, "_{}".format('locked'), kwargs.get('locked', None)) setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None)) setattr(self, "_{}".format('max_length'), kwargs.get('max_length', None)) setattr(self, "_{}".format('max_length_metadata'), kwargs.get('max_length_metadata', None)) setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None)) setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None)) setattr(self, "_{}".format('original_value'), kwargs.get('original_value', None)) setattr(self, "_{}".format('original_value_metadata'), kwargs.get('original_value_metadata', None)) setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None)) setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None)) setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None)) setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None)) setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None)) setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None)) setattr(self, "_{}".format('required'), kwargs.get('required', None)) setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None)) setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None)) setattr(self, "_{}".format('source'), kwargs.get('source', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None)) setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None)) setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None)) setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None)) setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None)) setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None)) setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None)) setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None)) setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None)) setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None)) setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None)) setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None)) setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None)) setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None)) setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None)) setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None)) setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None)) setattr(self, "_{}".format('underline'), kwargs.get('underline', None)) setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None)) setattr(self, "_{}".format('value'), kwargs.get('value', None)) setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None)) setattr(self, "_{}".format('width'), kwargs.get('width', None)) setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None)) setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None)) setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None)) setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None)) setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None)) @property def anchor_allow_white_space_in_characters(self): return self._anchor_allow_white_space_in_characters @anchor_allow_white_space_in_characters.setter def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters): self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters @property def anchor_allow_white_space_in_characters_metadata(self): return self._anchor_allow_white_space_in_characters_metadata @anchor_allow_white_space_in_characters_metadata.setter def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata): self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata @property def anchor_case_sensitive(self): return self._anchor_case_sensitive @anchor_case_sensitive.setter def anchor_case_sensitive(self, anchor_case_sensitive): self._anchor_case_sensitive = anchor_case_sensitive @property def anchor_case_sensitive_metadata(self): return self._anchor_case_sensitive_metadata @anchor_case_sensitive_metadata.setter def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata): self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata @property def anchor_horizontal_alignment(self): return self._anchor_horizontal_alignment @anchor_horizontal_alignment.setter def anchor_horizontal_alignment(self, anchor_horizontal_alignment): self._anchor_horizontal_alignment = anchor_horizontal_alignment @property def anchor_horizontal_alignment_metadata(self): return self._anchor_horizontal_alignment_metadata @anchor_horizontal_alignment_metadata.setter def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata): self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata @property def anchor_ignore_if_not_present(self): return self._anchor_ignore_if_not_present @anchor_ignore_if_not_present.setter def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present): self._anchor_ignore_if_not_present = anchor_ignore_if_not_present @property def anchor_ignore_if_not_present_metadata(self): return self._anchor_ignore_if_not_present_metadata @anchor_ignore_if_not_present_metadata.setter def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata): self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata @property def anchor_match_whole_word(self): return self._anchor_match_whole_word @anchor_match_whole_word.setter def anchor_match_whole_word(self, anchor_match_whole_word): self._anchor_match_whole_word = anchor_match_whole_word @property def anchor_match_whole_word_metadata(self): return self._anchor_match_whole_word_metadata @anchor_match_whole_word_metadata.setter def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata): self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata @property def anchor_string(self): return self._anchor_string @anchor_string.setter def anchor_string(self, anchor_string): self._anchor_string = anchor_string @property def anchor_string_metadata(self): return self._anchor_string_metadata @anchor_string_metadata.setter def anchor_string_metadata(self, anchor_string_metadata): self._anchor_string_metadata = anchor_string_metadata @property def anchor_tab_processor_version(self): return self._anchor_tab_processor_version @anchor_tab_processor_version.setter def anchor_tab_processor_version(self, anchor_tab_processor_version): self._anchor_tab_processor_version = anchor_tab_processor_version @property def anchor_tab_processor_version_metadata(self): return self._anchor_tab_processor_version_metadata @anchor_tab_processor_version_metadata.setter def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata): self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata @property def anchor_units(self): return self._anchor_units @anchor_units.setter def anchor_units(self, anchor_units): self._anchor_units = anchor_units @property def anchor_units_metadata(self): return self._anchor_units_metadata @anchor_units_metadata.setter def anchor_units_metadata(self, anchor_units_metadata): self._anchor_units_metadata = anchor_units_metadata @property
MIT License
lsapan/docker-swarm-demo
secrets.py
secret
python
def secret(name, strip=True): with open(secret_path(name), 'r') as f: val = f.read() if strip: val = val.strip() return val
Returns the value of a secret from the docker container.
https://github.com/lsapan/docker-swarm-demo/blob/f6b905fbbbe4b74d62a4af664f9a677f3cb03b8e/secrets.py#L18-L26
import os class SecretNotFoundError(IOError): pass def secret_path(name): secret_path = f'/run/secrets/{name}' if not os.path.isfile(secret_path): raise SecretNotFoundError(name) return secret_path
MIT License
uvjustin/alarmdotcom
custom_components/alarmdotcom/alarm_control_panel.py
AlarmDotCom._validate_code
python
def _validate_code(self, code): check = self._code is None or code == self._code if not check: _LOGGER.warning("Wrong code entered") return check
Validate given code.
https://github.com/uvjustin/alarmdotcom/blob/8b7288bfd1c21a5687cd2359753f0d2efceca726/custom_components/alarmdotcom/alarm_control_panel.py#L215-L220
import logging import re from pyalarmdotcomajax import Alarmdotcom, AlarmdotcomADT, AlarmdotcomProtection1 import voluptuous as vol import homeassistant.components.alarm_control_panel as alarm try: from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity except ImportError: from homeassistant.components.alarm_control_panel import ( AlarmControlPanel as AlarmControlPanelEntity, ) from homeassistant.components.alarm_control_panel import PLATFORM_SCHEMA from homeassistant.components.alarm_control_panel.const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, ) from homeassistant.const import ( CONF_CODE, CONF_NAME, CONF_PASSWORD, CONF_USERNAME, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED, ) from homeassistant.helpers.aiohttp_client import ( async_create_clientsession, async_get_clientsession, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Alarm.com" CONF_FORCE_BYPASS = "force_bypass" CONF_NO_ENTRY_DELAY = "no_entry_delay" CONF_SILENT_ARMING = "silent_arming" CONF_ADT = "adt" CONF_PROTECTION1 = "protection1" CONF_TWO_FACTOR_COOKIE = "two_factor_cookie" DOMAIN = "alarmdotcom" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Optional(CONF_CODE): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_FORCE_BYPASS, default="false"): cv.string, vol.Optional(CONF_NO_ENTRY_DELAY, default="false"): cv.string, vol.Optional(CONF_SILENT_ARMING, default="false"): cv.string, vol.Optional(CONF_ADT, default=False): cv.boolean, vol.Optional(CONF_PROTECTION1, default=False): cv.boolean, vol.Optional(CONF_TWO_FACTOR_COOKIE): cv.string, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): name = config.get(CONF_NAME) code = config.get(CONF_CODE) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) force_bypass = config.get(CONF_FORCE_BYPASS) no_entry_delay = config.get(CONF_NO_ENTRY_DELAY) silent_arming = config.get(CONF_SILENT_ARMING) two_factor_cookie = config.get(CONF_TWO_FACTOR_COOKIE) use_new_websession = hass.data.get(DOMAIN) adt_or_protection1 = 0 if config.get(CONF_ADT): adt_or_protection1 = 1 elif config.get(CONF_PROTECTION1): adt_or_protection1 = 2 if not use_new_websession: hass.data[DOMAIN] = True use_new_websession = False alarmdotcom = AlarmDotCom( hass, name, code, username, password, force_bypass, no_entry_delay, silent_arming, use_new_websession, adt_or_protection1, two_factor_cookie, ) await alarmdotcom.async_login() async_add_entities([alarmdotcom]) class AlarmDotCom(AlarmControlPanelEntity): def __init__( self, hass, name, code, username, password, force_bypass, no_entry_delay, silent_arming, use_new_websession, adt_or_protection1, two_factor_cookie, ): _LOGGER.debug("Setting up Alarm.com...") self._name = name self._code = code if code else None if use_new_websession: websession = async_create_clientsession(hass) _LOGGER.debug("Using new websession.") else: websession = async_get_clientsession(hass) _LOGGER.debug("Using hass websession.") self._state = None no_entry_delay = ( "stay" if no_entry_delay.lower() == "home" else no_entry_delay.lower() ) force_bypass = ( "stay" if force_bypass.lower() == "home" else force_bypass.lower() ) silent_arming = ( "stay" if silent_arming.lower() == "home" else silent_arming.lower() ) if adt_or_protection1 == 1: adc_class = AlarmdotcomADT elif adt_or_protection1 == 2: adc_class = AlarmdotcomProtection1 else: adc_class = Alarmdotcom self._alarm = adc_class( username, password, websession, force_bypass, no_entry_delay, silent_arming, two_factor_cookie, ) async def async_login(self): await self._alarm.async_login() async def async_update(self): await self._alarm.async_update() return self._alarm.state @property def name(self): return self._name @property def code_format(self): if self._code is None: return None if isinstance(self._code, str) and re.search("^\\d+$", self._code): return alarm.FORMAT_NUMBER return alarm.FORMAT_TEXT @property def state(self): if self._alarm.state.lower() == "disarmed": return STATE_ALARM_DISARMED if self._alarm.state.lower() == "armed stay": return STATE_ALARM_ARMED_HOME if self._alarm.state.lower() == "armed away": return STATE_ALARM_ARMED_AWAY if self._alarm.state.lower() == "armed night": return STATE_ALARM_ARMED_NIGHT return None @property def supported_features(self) -> int: return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY @property def device_state_attributes(self): return {"sensor_status": self._alarm.sensor_status} async def async_alarm_disarm(self, code=None): if self._validate_code(code): await self._alarm.async_alarm_disarm() async def async_alarm_arm_home(self, code=None): if self._validate_code(code): await self._alarm.async_alarm_arm_stay() async def async_alarm_arm_away(self, code=None): if self._validate_code(code): await self._alarm.async_alarm_arm_away()
MIT License
synbiodex/pysbol2
sbol2/componentdefinition.py
ComponentDefinition.linearize
python
def linearize(self, components=None): raise NotImplementedError("Not yet implemented")
TODO document :param components: An optional list of component definitions or URIs. If None, an empty list of ComponentDefinitions is assumed. :return: None
https://github.com/synbiodex/pysbol2/blob/127b92d60ecf6f9b6cb8fbf9657bb578bc983090/sbol2/componentdefinition.py#L887-L895
import os import posixpath from typing import Union from rdflib import URIRef from .component import Component from .config import Config, ConfigOptions from .constants import * from .toplevel import TopLevel from .property import OwnedObject, ReferencedObject, URIProperty from .sbolerror import SBOLError, SBOLErrorCode from .sequence import Sequence from .sequenceannotation import SequenceAnnotation from .sequenceconstraint import SequenceConstraint class ComponentDefinition(TopLevel): _types = None _roles = None components = None sequences = None sequenceAnnotations = None sequenceConstraints = None def __init__(self, uri=URIRef("example"), component_type=URIRef(BIOPAX_DNA), version=VERSION_STRING, type_uri=SBOL_COMPONENT_DEFINITION): super().__init__(type_uri, uri, version) self.types = URIProperty(self, SBOL_TYPES, '1', '*', None, component_type) self.roles = URIProperty(self, SBOL_ROLES, '0', '*', None) self.sequences = ReferencedObject(self, SBOL_SEQUENCE_PROPERTY, SBOL_SEQUENCE, '0', '*', None) self.sequenceAnnotations = OwnedObject(self, SBOL_SEQUENCE_ANNOTATIONS, SequenceAnnotation, '0', '*', None) self.components = OwnedObject(self, SBOL_COMPONENTS, Component, '0', '*', None) self.sequenceConstraints = OwnedObject(self, SBOL_SEQUENCE_CONSTRAINTS, SequenceConstraint, '0', '*', None) self._sequence_cache: Union[Sequence, None] = None @property def sequence(self): seqs = self.sequences if not seqs: return None if self.doc: seq_uri = seqs[0] try: return self.doc.sequences[seq_uri] except SBOLError as e: if e.error_code() != SBOLErrorCode.NOT_FOUND_ERROR: raise return None else: if self._sequence_cache and self._sequence_cache.identity in seqs: return self._sequence_cache return None @sequence.setter def sequence(self, sequence: Union[Sequence, None]): if not sequence: self.sequences = None self._sequence_cache = None return if self.doc: try: self.doc.add(sequence) except SBOLError as e: if e.error_code() != SBOLErrorCode.DUPLICATE_URI_ERROR: raise else: self._sequence_cache = sequence self.sequences = [sequence.identity] def _added_to_document(self, doc): super()._added_to_document(doc) if self._sequence_cache: try: doc.add(self._sequence_cache) except SBOLError as e: if e.error_code() == SBOLErrorCode.SBOL_ERROR_URI_NOT_UNIQUE: pass else: raise def addType(self, new_type): val = self.types val.append(new_type) self.types = val def removeType(self, index=0): val = self.types del val[index] self.types = val def addRole(self, new_role): val = self.roles val.append(new_role) self.roles = val def removeRole(self, index=0): val = self.roles del val[index] self.roles = val def assemble(self, component_list, assembly_method=None, doc=None): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise EnvironmentError('Assemble method requires SBOL-compliance enabled') if not self.doc and not doc: raise ValueError('Missing doc argument. If the ComponentDefinition does ' 'not belong to a Document, a target Document must be ' 'specified using the doc keyword argument.') if doc and self.doc != doc: raise ValueError('Invalid doc argument. Do not use the doc keyword ' 'argument if the ComponentDefinition already belongs ' 'to a Document') doc = doc if doc else self.doc if isinstance(component_list, list) and all(isinstance(c, ComponentDefinition) for c in component_list): for cdef in component_list: if cdef.doc and cdef.doc is not doc: raise ValueError('Invalid component_list specified. Assembly ' 'subcomponents must belong to the same Document ' 'as self.') elif isinstance(component_list, list) and all(isinstance(c, str) for c in component_list): component_identities = component_list[:] component_list = [] for c_id in component_identities: if c_id not in doc.componentDefinitions: raise ValueError('Invalid component_list specified. ' 'ComponentDefinition <%s> not found.' % c_id) cdef = doc.componentDefinitions[c_id] component_list.append(cdef) else: raise TypeError('Invalid component_list specified. Please provide a list ' 'of ComponentDefinitions or, alternatively, a list of ' 'ComponentDefinition displayIds') if not self.doc: doc.addComponentDefinition(self) for cdef in component_list: if not cdef.doc: self.doc.addComponentDefinition(cdef) if assembly_method: component_list = assembly_method(component_list) if not all(type(c) is ComponentDefinition for c in component_list): raise TypeError('Invalid callback specified for assembly_method. The ' 'callback must return a list of ComponentDefinitions') instance_list = [] for cdef in component_list: instance_count = 0 component_id = self.persistentIdentity + "/" + cdef.displayId + "_" + str(instance_count) + "/" + self.version while self.find(component_id): instance_count += 1 component_id = self.persistentIdentity + "/" + cdef.displayId + "_" + str(instance_count) + "/" + self.version c = self.components.create(cdef.displayId + "_" + str(instance_count)) c.definition = cdef.identity instance_list.append(c) return component_list def assemblePrimaryStructure(self, primary_structure, assembly_method=None, doc=None): primary_structure = self.assemble(primary_structure, assembly_method, doc) doc = doc if doc else self.doc if all(isinstance(c, str) for c in primary_structure): component_identities = primary_structure[:] primary_structure = [] for c_id in component_identities: cdef = doc.componentDefinitions[c_id] primary_structure.append(cdef) self.types += [SO_LINEAR] component_map = {} for c in self.components: if c.definition not in component_map: component_map[c.definition] = [c] else: component_map[c.definition].append(c) primary_structure_components = [] for cd in primary_structure: primary_structure_components.append(component_map[cd.identity].pop()) if len(self.sequenceConstraints): self.sequenceConstraints.clear() for upstream, downstream in zip(primary_structure_components[:-1], primary_structure_components[1:]): instance_count = 0 constraint_id = 'constraint_%d' % instance_count while constraint_id in self.sequenceConstraints: instance_count += 1 constraint_id = 'constraint_%d' % instance_count sc = self.sequenceConstraints.create(constraint_id) sc.subject = upstream sc.object = downstream sc.restriction = SBOL_RESTRICTION_PRECEDES def compile(self, assembly_method=None): if not self.doc: raise ValueError('Cannot compile <%s>. The ComponentDefinition must belong ' 'to a Document in order to compile.' % self.identity) if self.sequence is None: sequence_id = self.displayId + '_seq' compliant_uris = Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS) typed_uris = Config.getOption(ConfigOptions.SBOL_TYPED_URIS) if compliant_uris and typed_uris: sequence_id = self.displayId self.sequence = Sequence(sequence_id) return self.sequence.compile(assembly_method=assembly_method) def updateSequence(self, composite_sequence=""): raise NotImplementedError("Not yet implemented") def getInSequentialOrder(self): return self.getPrimaryStructureComponents() def hasUpstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot determine upstream Component. ' 'Self has no SequenceConstraints') else: for sc in self.sequenceConstraints: if sc.object == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: return True return False def getUpstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot get upstream Component. Self ' 'has no SequenceConstraints') else: upstream_component_id = None for sc in self.sequenceConstraints: if sc.object == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: upstream_component = self.components[sc.subject] return upstream_component raise SBOLError(SBOLErrorCode.SBOL_ERROR_END_OF_LIST, 'This component has no upstream ' 'component. Use hasUpstreamComponent to catch this error') def hasDownstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot determine upstream Component. ' 'Self has no SequenceConstraints') else: for sc in self.sequenceConstraints: if sc.subject == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: return True return False def getDownstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot get downstream Component. ' 'Self has no SequenceConstraints') else: upstream_component_id = None for sc in self.sequenceConstraints: if sc.subject == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: upstream_component = self.components[sc.object] return upstream_component raise SBOLError(SBOLErrorCode.SBOL_ERROR_END_OF_LIST, 'This component has no downstream ' 'component. Use hasDownstreamComponent to catch this error') def deleteDownstreamComponent(self, upstream_component): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if upstream_component.identity not in self.components: msg = 'Deletion failed. ComponentDefinition %s has no child component %s' msg = msg % (self.identity, upstream_component.identity) raise ValueError(msg) primary_structure = self.getPrimaryStructureComponents() if upstream_component.identity == primary_structure[-1].identity: msg = 'Deletion failed. No Components were found downstream of %s' msg = msg % upstream_component.identity raise ValueError( msg) downstream_component = None upstream_sequence_constraint = None downstream_sequence_constraint = None for c_upstream, c_downstream in zip(primary_structure[:-1], primary_structure[1:]): for sc in self.sequenceConstraints: if (sc.subject == c_upstream.identity and sc.object == c_downstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): upstream_sequence_constraint = downstream_sequence_constraint downstream_sequence_constraint = sc if downstream_component: break if c_upstream.identity == upstream_component.identity: downstream_component = c_downstream if downstream_component: self.components.remove(downstream_component.identity) self.sequenceConstraints.remove(downstream_sequence_constraint.identity) if downstream_sequence_constraint.subject == downstream_component.identity: upstream_sequence_constraint.object = downstream_sequence_constraint.object def deleteUpstreamComponent(self, downstream_component): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if downstream_component.identity not in self.components: msg = 'Deletion failed. No Components were found upstream of %s' msg = msg % downstream_component.identity raise ValueError(msg) primary_structure = self.getPrimaryStructureComponents() if downstream_component.identity == primary_structure[0].identity: msg = 'Deletion failed. Component %s does not have an upstream component' msg = msg % downstream_component.identity raise ValueError(msg) upstream_component = None upstream_sequence_constraint = None downstream_sequence_constraint = None for c_upstream, c_downstream in zip(primary_structure[:-1], primary_structure[1:]): for sc in self.sequenceConstraints: if (sc.subject == c_upstream.identity and sc.object == c_downstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): upstream_sequence_constraint = downstream_sequence_constraint downstream_sequence_constraint = sc if c_downstream.identity == downstream_component.identity: upstream_component = c_upstream break if upstream_component: self.components.remove(upstream_component.identity) self.sequenceConstraints.remove(downstream_sequence_constraint.identity) if upstream_sequence_constraint: upstream_sequence_constraint.object = downstream_sequence_constraint.object def insertUpstreamComponent(self, downstream, insert): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if not self.doc: msg = f'ComponentDefinition {self.identity} does not belong to a Document' msg += ' Add this ComponentDefinition to a Document before calling' msg += ' insertUpstreamComponent' raise ValueError(msg) if self.doc != insert.doc: msg = f'Invalid Document for ComponentDefinition {insert.identity}.' msg += ' Add the insert to the same Document as the calling object.' raise ValueError(msg) if not insert.doc: insert.doc = self.doc target_constraint = None for sc in self.sequenceConstraints: if (sc.object == downstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): if target_constraint is not None: msg = 'SequenceConstraints are ambiguous. The target component' msg += ' may have more than one downstream component specified' raise ValueError(msg) target_constraint = sc instance_count = 0 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) while self.find(component_id): instance_count += 1 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) c_insert = self.components.create(f'{insert.displayId}_{instance_count}') c_insert.definition = insert.identity instance_count = 0 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) while self.find(sc_id): instance_count += 1 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) sc_new = self.sequenceConstraints.create(f'constraint_{instance_count}') sc_new.subject = component_id sc_new.object = downstream.identity sc_new.restriction = SBOL_RESTRICTION_PRECEDES if target_constraint: target_constraint.object = c_insert.identity def insertDownstreamComponent(self, upstream, insert): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if not self.doc: msg = f'ComponentDefinition {self.identity} does not belong to a Document' msg += ' Add this ComponentDefinition to a Document before calling' msg += ' insertUpstreamComponent' raise ValueError(msg) if self.doc != insert.doc: msg = f'Invalid Document for ComponentDefinition {insert.identity}.' msg += ' Add the insert to the same Document as the calling object.' raise ValueError(msg) if not insert.doc: insert.doc = self.doc target_constraint = None for sc in self.sequenceConstraints: if (sc.subject == upstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): if target_constraint is not None: msg = 'SequenceConstraints are ambiguous. The target component' msg += ' may have more than one downstream component specified' raise ValueError(msg) target_constraint = sc instance_count = 0 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) while self.find(component_id): instance_count += 1 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) c_insert = self.components.create(f'{insert.displayId}_{instance_count}') c_insert.definition = insert.identity instance_count = 0 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) while self.find(sc_id): instance_count += 1 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) sc_new = self.sequenceConstraints.create(f'constraint_{instance_count}') sc_new.subject = upstream.identity sc_new.object = component_id sc_new.restriction = SBOL_RESTRICTION_PRECEDES if target_constraint: target_constraint.object = c_insert.identity def getFirstComponent(self): if len(self.components) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'This ComponentDefinition has no ' 'components') arbitrary_component = self.components[0] next_component = arbitrary_component while self.hasUpstreamComponent(next_component): next_component = self.getUpstreamComponent(next_component) return next_component def getLastComponent(self): if len(self.components) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'This ComponentDefinition has no ' 'components') arbitrary_component = self.components[0] next_component = arbitrary_component while self.hasDownstreamComponent(next_component): next_component = self.getDownstreamComponent(next_component) return next_component def applyToComponentHierarchy(self, callback=None, user_data=None): raise NotImplementedError("Not yet implemented") def getPrimaryStructureComponents(self): subcomponents = [] if len(self.components) == 1: subcomponents.append(self.components[0]) else: if len(self.sequenceConstraints) != (len(self.components) - 1): raise ValueError('ComponentDefinition <%s> does not appear to describe' 'a complete primary structure. It appears to be ' 'missing SequenceConstraints.' % self.identity) c_first = self.getFirstComponent() subcomponents.append(c_first) c_next = c_first while self.hasDownstreamComponent(c_next): c_next = self.getDownstreamComponent(c_next) subcomponents.append(c_next) return subcomponents def getPrimaryStructure(self): if self.doc is None: raise SBOLError(SBOLErrorCode.SBOL_ERROR_MISSING_DOCUMENT, 'Cannot get primary structure.' 'Self must belong to a Document.') component_ids = [c.definition for c in self.getPrimaryStructureComponents()] return [self.doc.getComponentDefinition(c) for c in component_ids] def insertDownstream(self, target, component_to_insert): raise NotImplementedError("Not yet implemented") def insertUpstream(self, target, component_to_insert): raise NotImplementedError("Not yet implemented") def addUpstreamFlank(self, target, elements): raise NotImplementedError("Not yet implemented") def addDownstreamFlank(self, target, elements): raise NotImplementedError("Not yet implemented") def isRegular(self, msg=None): raise NotImplementedError("Not yet implemented") def isComplete(self, msg=None): raise NotImplementedError("Not yet implemented") def disassemble(self, range_start=1): raise NotImplementedError("Not yet implemented")
Apache License 2.0
flyteorg/flytekit
flytekit/models/literals.py
RetryStrategy.__init__
python
def __init__(self, retries): self._retries = retries
:param int retries: Number of retries to attempt on recoverable failures. If retries is 0, then only one attempt will be made.
https://github.com/flyteorg/flytekit/blob/6c032035563ae645b0b93558b3fe3362080057ea/flytekit/models/literals.py#L15-L20
from datetime import datetime as _datetime import pytz as _pytz from flyteidl.core import literals_pb2 as _literals_pb2 from google.protobuf.struct_pb2 import Struct from flytekit.common.exceptions import user as _user_exceptions from flytekit.models import common as _common from flytekit.models.core import types as _core_types from flytekit.models.types import OutputReference as _OutputReference from flytekit.models.types import SchemaType as _SchemaType class RetryStrategy(_common.FlyteIdlEntity):
Apache License 2.0
practical-data-science/ecommercetools
ecommercetools/utilities/metrics.py
average_tickets_to_resolve
python
def average_tickets_to_resolve(total_tickets, total_resolutions): return total_tickets / total_resolutions
Returns the average number of tickets required to resolve an issue. Args: total_tickets (int): Total chats, emails, or tickets in the period. total_resolutions (int): Total chats, emails, or tickets resolved in the period. Returns: Average number of tickets it takes to resolve an issue.
https://github.com/practical-data-science/ecommercetools/blob/b00175d7775dc4f6ad57b52702a0b2acce3425fc/ecommercetools/utilities/metrics.py#L732-L743
import math from datetime import datetime def tax(gross_revenue, tax_rate=0.2): return gross_revenue * tax_rate def net_revenue(gross_revenue, tax_rate=0.2): total_tax = tax(gross_revenue, tax_rate) return gross_revenue - total_tax def aov(total_revenue, total_orders): return total_revenue / total_orders def product_cost(gross_revenue, margin, tax_rate=0.2): revenue_net = net_revenue(gross_revenue, tax_rate) return revenue_net * margin def gross_profit(gross_revenue, margin, tax_rate=0.2): cost_product = product_cost(gross_revenue, margin, tax_rate) cost_tax = tax(gross_revenue, tax_rate) return gross_revenue - (cost_product + cost_tax) def net_profit(gross_revenue, other_costs, margin, tax_rate=0.2): cost_product = product_cost(gross_revenue, margin, tax_rate) cost_tax = tax(gross_revenue, tax_rate) return gross_revenue - (cost_product + cost_tax + other_costs) def sales_growth_rate(sales_period_1, sales_period_2): return ((sales_period_2 - sales_period_1) / sales_period_1) * 100 def revenue_per_unit(total_revenue, total_units): return total_revenue / total_units def market_share(company_sales, market_sales): return (company_sales / market_sales) * 100 def retention_rate(customers_repurchasing_current_period, customers_purchasing_previous_period): return (customers_repurchasing_current_period / customers_purchasing_previous_period) * 100 def share_of_shelf_index(products_of_brand_x, total_products): return (products_of_brand_x / total_products) * 100 def product_turnover(units_sold_in_period, average_items_stocked_in_period): return (units_sold_in_period / average_items_stocked_in_period) * 100 def price_index(price_of_product_x, price_of_product_y): return (price_of_product_x / price_of_product_y) * 100 def purchase_intention(people_who_declared_interest, total_people): return (people_who_declared_interest / total_people) * 100 def product_trial_rate(number_of_first_time_purchases, total_purchasers): return (number_of_first_time_purchases / total_purchasers) * 100 def product_repurchase_rate(number_of_repeat_purchasers, total_purchasers): return (number_of_repeat_purchasers / total_purchasers) * 100 def product_consumption_rate(total_items, total_orders): return (total_items / total_orders) * 100 def brand_usage(number_of_brand_purchasers, total_purchasers): return (number_of_brand_purchasers / total_purchasers) * 100 def brand_penetration_rate(number_of_brand_purchasers, total_purchasers): return (number_of_brand_purchasers / total_purchasers) * 100 def product_satisfaction(total_reviews, positive_reviews): return (positive_reviews / total_reviews) * 100 def market_coverage_index(unique_customers_contacted, unique_customers): return (unique_customers_contacted / unique_customers) * 100 def sales_force_efficiency(number_of_orders_from_visits, number_of_visits): return (number_of_orders_from_visits / number_of_visits) * 100 def cpm(total_cost, total_recipients): return (total_cost / total_recipients) * 1000 def cpo(total_cost, total_transactions): return total_cost / total_transactions def cpa(total_cost, total_acquisitions): return total_cost / total_acquisitions def cpc(total_cost, total_clicks): return total_cost / total_clicks def conversion_rate(total_conversions, total_actions): return (total_conversions / total_actions) * 100 def lin_rodnitsky_ratio(avg_cost_per_conversion_all_queries, avg_cost_per_conversion_queries_with_one_conversion_or_more): return avg_cost_per_conversion_all_queries / avg_cost_per_conversion_queries_with_one_conversion_or_more def romi(total_revenue, total_marketing_costs): return ((total_revenue - total_marketing_costs) / total_marketing_costs) * 100 def roi(total_revenue, total_marketing_costs, total_other_costs): total_costs = total_marketing_costs + total_other_costs return ((total_revenue - total_costs) / total_costs) * 100 def roas(total_revenue, total_marketing_costs): return total_revenue / total_marketing_costs def focus_index(average_pages_visited_in_section, total_pages_in_section): return (average_pages_visited_in_section / total_pages_in_section) * 100 def stickiness(total_visits, total_visit_duration, total_users): frequency_of_visits = total_visits / total_users average_visit_duration = total_visit_duration / total_visits total_reach = total_users / total_visits return frequency_of_visits * average_visit_duration * total_reach def sessions_with_product_views(total_sessions, sessions_with_product_views): return (sessions_with_product_views / total_sessions) * 100 def engagement_rate(followers_who_engaged, total_followers): return (followers_who_engaged / total_followers) * 100 def dio(average_inventory_cost, cost_of_goods_sold): return (average_inventory_cost / cost_of_goods_sold) * 365 def safety_stock(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time): return (max_units_sold_daily * max_lead_time) - (avg_units_sold_daily * avg_lead_time) def reorder_point(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time, lead_time): safety = safety_stock(max_units_sold_daily, avg_units_sold_daily, max_lead_time, avg_lead_time) return (lead_time * avg_units_sold_daily) + safety def back_order_rate(total_back_orders, total_orders): return (total_back_orders / total_orders) * 100 def sales_velocity(units_sold_last_12m, number_of_days_in_stock, velocity_days=30): return (units_sold_last_12m / number_of_days_in_stock) * velocity_days def accuracy_of_forecast_demand(actual_demand, forecast_demand): return ((actual_demand - forecast_demand) / actual_demand) * 100 def eoq(demand_in_units, cost_of_ordering, cost_of_carrying): return math.sqrt(((demand_in_units * cost_of_ordering) * 2) / cost_of_carrying) def csat(total_responses, positive_responses): return (positive_responses / total_responses) * 100 def nps(total_promoters, total_detractors, total_respondents): return ((total_promoters * 100) / total_respondents) - ((total_detractors * 100) / total_respondents) def ticket_to_order_ratio(total_tickets, total_orders): return (total_tickets / total_orders) * 100
MIT License
rapid7/vm-console-client-python
rapid7vmconsole/models/review.py
Review.to_dict
python
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Review, dict): for key, value in self.items(): result[key] = value return result
Returns the model properties as a dict
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/review.py#L181-L206
import pprint import re import six class Review(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'comment': 'str', '_date': 'str', 'links': 'list[Link]', 'name': 'str', 'user': 'int' } attribute_map = { 'comment': 'comment', '_date': 'date', 'links': 'links', 'name': 'name', 'user': 'user' } def __init__(self, comment=None, _date=None, links=None, name=None, user=None): self._comment = None self.__date = None self._links = None self._name = None self._user = None self.discriminator = None if comment is not None: self.comment = comment if _date is not None: self._date = _date if links is not None: self.links = links if name is not None: self.name = name if user is not None: self.user = user @property def comment(self): return self._comment @comment.setter def comment(self, comment): self._comment = comment @property def _date(self): return self.__date @_date.setter def _date(self, _date): self.__date = _date @property def links(self): return self._links @links.setter def links(self, links): self._links = links @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def user(self): return self._user @user.setter def user(self, user): self._user = user
MIT License
googlecloudplatform/tensorflow-recommendation-wals
airflow/plugins/gae_admin_plugin.py
AppEngineAdminHook.get_version_identifiers
python
def get_version_identifiers(self, project_id, service_id): request = self._gaeadmin.apps().services().versions().list(appsId=project_id, servicesId=service_id) versions = [] while request is not None: versions_doc = request.execute() versions.extend([v['id'] for v in versions_doc['versions']]) request = self._gaeadmin.apps().services().versions().list_next(request, versions_doc) return versions
Get list of versions of a service on App Engine Engine. Args: project_id: project id service_id: service id Returns: the list of version identifiers if successful and raises an error otherwise.
https://github.com/googlecloudplatform/tensorflow-recommendation-wals/blob/2116cd21e4cc77b7380cccc9fee6f2ed606119db/airflow/plugins/gae_admin_plugin.py#L121-L140
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.plugins_manager import AirflowPlugin from airflow.utils.decorators import apply_defaults from apiclient.discovery import build from datetime import datetime from googleapiclient import errors import logging from oauth2client.client import GoogleCredentials import time class AppEngineAdminHook(GoogleCloudBaseHook): def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None): super(AppEngineAdminHook, self).__init__(gcp_conn_id, delegate_to) self._gaeadmin = self.get_ae_conn() self._svcadmin = self.get_svc_conn() def get_ae_conn(self): credentials = GoogleCredentials.get_application_default() return build('appengine', 'v1', credentials=credentials) def get_svc_conn(self): credentials = GoogleCredentials.get_application_default() return build('servicemanagement', 'v1', credentials=credentials) def create_version(self, project_id, service_id, version_spec): create_request = self._gaeadmin.apps().services().versions().create( appsId=project_id, servicesId=service_id, body=version_spec) response = create_request.execute() op_name = response['name'].split('/')[-1] return self._wait_for_operation_done(project_id, op_name) def migrate_traffic(self, project_id, service_id, new_version): split_config = {'split': {'allocations': {new_version: '1'}}} migrate_request = self._gaeadmin.apps().services().patch( appsId=project_id, servicesId=service_id, updateMask='split', body=split_config) response = migrate_request.execute() op_name = response['name'].split('/')[-1] return self._wait_for_operation_done(project_id, op_name) def get_endpoint_config(self, service_id): resource = self._svcadmin.services().rollouts() list_request = resource.list(serviceName=service_id) response = list_request.execute() config_id = response['rollouts'][0]['rolloutId'] return config_id def get_version(self, project_id, service_id, version): resource = self._gaeadmin.apps().services().versions() get_request = resource.get(appsId=project_id, servicesId=service_id, versionsId=version, view='FULL') response = get_request.execute() return response
Apache License 2.0
ebagdasa/federated_adaptation
utils/text_load.py
Corpus.tokenize_train
python
def tokenize_train(self, path): files = os.listdir(path) per_participant_ids = list() per_participant_ids_test = list() per_participant_different_words = list() per_participant_voc_size = list() for file in tqdm(files[:self.authors_no]): if 'checkpoint' in file: continue new_path=f'{path}/{file}' with open(new_path, 'r') as f: diff_word = 0 tokens = 0 word_list = list() for line in f: words = get_word_list(line, self.dictionary) tokens += len(words) wordidx = [self.dictionary.word2idx[x] for x in words] diff_word += sum([i not in word_list for i in wordidx]) word_list.extend(wordidx) ids = torch.LongTensor(word_list) ids_test = torch.LongTensor(word_list[len(word_list)//100*(100-self.local_test_perc):]) if len(ids)>=10: per_participant_ids.append(ids) per_participant_ids_test.append(ids_test) per_participant_different_words.append(diff_word) per_participant_voc_size.append(tokens) return per_participant_ids, per_participant_ids_test, per_participant_different_words, per_participant_voc_size
We return a list of ids per each participant. :param path: :return:
https://github.com/ebagdasa/federated_adaptation/blob/0c0ae97445fddc635c427ae100854bc70a00a11c/utils/text_load.py#L49-L82
import os import torch import json import re from tqdm import tqdm import random filter_symbols = re.compile('[a-zA-Z]*') class Dictionary(object): def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, word): raise ValueError("Please don't call this method, so we won't break the dictionary :) ") def __len__(self): return len(self.idx2word) def get_word_list(line, dictionary): splitted_words = json.loads(line.lower()).split() words = ['<bos>'] for word in splitted_words: word = filter_symbols.search(word)[0] if len(word)>1: if dictionary.word2idx.get(word, False): words.append(word) else: words.append('<unk>') words.append('<eos>') return words class Corpus(object): def __init__(self, params, dictionary): repopath = params['repo_path'] self.path = f'{repopath}/data' authors_no = params['number_of_total_participants'] self.local_test_perc = params['local_test_perc'] self.dictionary = dictionary self.no_tokens = len(self.dictionary) self.authors_no = authors_no self.auxiliary = self.tokenize_aux(os.path.join(self.path, 'test_data.json')) self.train, self.test, self.diff_words, self.voc_size = self.tokenize_train(f'{self.path}/shard_by_author')
MIT License
laszukdawid/pyemd
PyEMD/EEMD.py
EEMD.eemd
python
def eemd(self, S: np.ndarray, T: Optional[np.ndarray] = None, max_imf: int = -1) -> np.ndarray: if T is None: T = get_timeline(len(S), S.dtype) scale = self.noise_width*np.abs(np.max(S)-np.min(S)) self._S = S self._T = T self._N = len(S) self._scale = scale self.max_imf = max_imf if self.parallel: pool = Pool(processes=self.processes) all_IMFs = pool.map(self._trial_update, range(self.trials)) pool.close() else: all_IMFs = map(self._trial_update, range(self.trials)) self._all_imfs = defaultdict(list) for (imfs, trend) in all_IMFs: if trend is not None: self._all_imfs[-1].append(trend) for imf_num, imf in enumerate(imfs): self._all_imfs[imf_num].append(imf) self._all_imfs = dict(self._all_imfs) if -1 in self._all_imfs: self._all_imfs[len(self._all_imfs)] = self._all_imfs.pop(-1) for imf_num in self._all_imfs.keys(): self._all_imfs[imf_num] = np.array(self._all_imfs[imf_num]) self.E_IMF = self.ensemble_mean() self.residue = S - np.sum(self.E_IMF, axis=0) return self.E_IMF
Performs EEMD on provided signal. For a large number of iterations defined by `trials` attr the method performs :py:meth:`emd` on a signal with added white noise. Parameters ---------- S : numpy array, Input signal on which EEMD is performed. T : numpy array or None, (default: None) If none passed samples are numerated. max_imf : int, (default: -1) Defines up to how many IMFs each decomposition should be performed. By default (negative value) it decomposes all IMFs. Returns ------- eIMF : numpy array Set of ensemble IMFs produced from input signal. In general, these do not have to be, and most likely will not be, same as IMFs produced using EMD.
https://github.com/laszukdawid/pyemd/blob/3d8ec292cd2ba8cba327d3e0ad576366a8ead6ff/PyEMD/EEMD.py#L141-L212
from __future__ import print_function import logging import numpy as np from collections import defaultdict from multiprocessing import Pool from typing import Dict, List, Optional, Sequence, Tuple, Union from PyEMD.utils import get_timeline class EEMD: logger = logging.getLogger(__name__) noise_kinds_all = ["normal", "uniform"] def __init__(self, trials: int = 100, noise_width: float = 0.05, ext_EMD = None, parallel: bool = False, **kwargs): self.trials = trials self.noise_width = noise_width self.separate_trends = bool(kwargs.get('separate_trends', False)) self.random = np.random.RandomState() self.noise_kind = kwargs.get('noise_kind', 'normal') self.parallel = parallel self.processes = kwargs.get('processes') if self.processes is not None and not self.parallel: self.logger.warning("Passed value for process has no effect when `parallel` is False.") if ext_EMD is None: from PyEMD import EMD self.EMD = EMD(**kwargs) else: self.EMD = ext_EMD self.E_IMF = None self.residue = None self._all_imfs = {} def __call__(self, S: np.ndarray, T: Optional[np.ndarray] = None, max_imf: int = -1) -> np.ndarray: return self.eemd(S, T=T, max_imf=max_imf) def __getstate__(self) -> Dict: self_dict = self.__dict__.copy() if 'pool' in self_dict: del self_dict['pool'] return self_dict def generate_noise(self, scale: float, size: Union[int, Sequence[int]]) -> np.ndarray: if self.noise_kind == "normal": noise = self.random.normal(loc=0, scale=scale, size=size) elif self.noise_kind == "uniform": noise = self.random.uniform(low=-scale/2, high=scale/2, size=size) else: raise ValueError("Unsupported noise kind. Please assigned `noise_kind` to be one of these: {0}".format( str(self.noise_kinds_all))) return noise def noise_seed(self, seed: int) -> None: self.random.seed(seed)
Apache License 2.0
biolink/kgx
kgx/utils/kgx_utils.py
expand
python
def expand( curie: str, prefix_maps: Optional[List[dict]] = None, fallback: bool = True ) -> str: default_curie_maps = [ get_jsonld_context("monarch_context"), get_jsonld_context("obo_context"), ] if prefix_maps: uri = expand_uri(curie, prefix_maps) if uri == curie and fallback: uri = expand_uri(curie, default_curie_maps) else: uri = expand_uri(curie, default_curie_maps) return uri
Expand a given CURIE to an URI, based on mappings from `prefix_map`. This method will return the CURIE as the IRI if there is no mapping found. Parameters ---------- curie: str A CURIE prefix_maps: Optional[List[dict]] A list of prefix maps to use for mapping fallback: bool Determines whether to fallback to default prefix mappings, as determined by `prefixcommons.curie_util`, when CURIE prefix is not found in `prefix_maps`. Returns ------- str A URI corresponding to the CURIE
https://github.com/biolink/kgx/blob/247d113d5b593f078afce1951c63eee2a8cc1248/kgx/utils/kgx_utils.py#L264-L299
import importlib import re import time import uuid from enum import Enum from typing import List, Dict, Set, Optional, Any, Union import stringcase from linkml_runtime.linkml_model.meta import ( TypeDefinitionName, ElementName, SlotDefinition, ClassDefinition, TypeDefinition, Element, ) from bmt import Toolkit from cachetools import LRUCache import pandas as pd import numpy as np from prefixcommons.curie_util import contract_uri from prefixcommons.curie_util import expand_uri from kgx.config import get_logger, get_jsonld_context, get_biolink_model_schema from kgx.graph.base_graph import BaseGraph curie_lookup_service = None cache = None log = get_logger() DEFAULT_NODE_CATEGORY = "biolink:NamedThing" DEFAULT_EDGE_PREDICATE = "biolink:related_to" CORE_NODE_PROPERTIES = {"id", "name"} CORE_EDGE_PROPERTIES = {"id", "subject", "predicate", "object", "type"} LIST_DELIMITER = "|" class GraphEntityType(Enum): GRAPH = "graph" NODE = "node" EDGE = "edge" provenance_slot_types = { "knowledge_source": list, "primary_knowledge_source": str, "original_knowledge_source": str, "aggregator_knowledge_source": list, "supporting_data_source": list, "provided_by": list, } column_types = { "publications": list, "qualifiers": list, "category": list, "synonym": list, "same_as": list, "negated": bool, "xrefs": list, } column_types.update(provenance_slot_types) knowledge_provenance_properties = set(provenance_slot_types.keys()) extension_types = {"csv": ",", "tsv": "\t", "csv:neo4j": ",", "tsv:neo4j": "\t"} archive_read_mode = {"tar": "r", "tar.gz": "r:gz", "tar.bz2": "r:bz2"} archive_write_mode = {"tar": "w", "tar.gz": "w:gz", "tar.bz2": "w:bz2"} archive_format = { "r": "tar", "r:gz": "tar.gz", "r:bz2": "tar.bz2", "w": "tar", "w:gz": "tar.gz", "w:bz2": "tar.bz2", } is_provenance_property_multivalued = { "knowledge_source": True, "primary_knowledge_source": False, "original_knowledge_source": False, "aggregator_knowledge_source": True, "supporting_data_source": True, "provided_by": True, } is_property_multivalued = { "id": False, "subject": False, "object": False, "predicate": False, "description": False, "synonym": True, "in_taxon": False, "same_as": True, "name": False, "has_evidence": False, "category": True, "publications": True, "type": False, "relation": False, } is_property_multivalued.update(is_provenance_property_multivalued) def camelcase_to_sentencecase(s: str) -> str: return stringcase.sentencecase(s).lower() def snakecase_to_sentencecase(s: str) -> str: return stringcase.sentencecase(s).lower() def sentencecase_to_snakecase(s: str) -> str: return stringcase.snakecase(s).lower() def sentencecase_to_camelcase(s: str) -> str: return stringcase.pascalcase(stringcase.snakecase(s)) def format_biolink_category(s: str) -> str: if re.match("biolink:.+", s): return s else: formatted = sentencecase_to_camelcase(s) return f"biolink:{formatted}" def format_biolink_slots(s: str) -> str: if re.match("biolink:.+", s): return s else: formatted = sentencecase_to_snakecase(s) return f"biolink:{formatted}" def contract( uri: str, prefix_maps: Optional[List[Dict]] = None, fallback: bool = True ) -> str: curie = uri default_curie_maps = [ get_jsonld_context("monarch_context"), get_jsonld_context("obo_context"), ] if prefix_maps: curie_list = contract_uri(uri, prefix_maps) if len(curie_list) == 0: if fallback: curie_list = contract_uri(uri, default_curie_maps) if curie_list: curie = curie_list[0] else: curie = curie_list[0] else: curie_list = contract_uri(uri, default_curie_maps) if len(curie_list) > 0: curie = curie_list[0] return curie
BSD 3-Clause New or Revised License
nok/sklearn-porter
sklearn_porter/utils/Shell.py
Shell._run
python
def _run(method, cmd, cwd=None, shell=True, universal_newlines=True, stderr=STDOUT): if not cmd: error_msg = 'Passed empty text or list' raise AttributeError(error_msg) if isinstance(cmd, six.string_types): cmd = str(cmd) if shell: if isinstance(cmd, list): cmd = ' '.join(cmd) else: if isinstance(cmd, str): cmd = cmd.strip().split() out = method(cmd, shell=shell, cwd=cwd, stderr=stderr, universal_newlines=universal_newlines) if isinstance(out, bytes): out = out.decode('utf-8') return str(out).strip()
Internal wrapper for `call` amd `check_output`
https://github.com/nok/sklearn-porter/blob/8658c6567e28c570d96ab2e858c510f84b1d94dc/sklearn_porter/utils/Shell.py#L13-L31
import six from subprocess import call from subprocess import check_output from subprocess import STDOUT class Shell(object): @staticmethod
MIT License
chuckus/chromewhip
chromewhip/protocol/emulation.py
Emulation.setDefaultBackgroundColorOverride
python
def setDefaultBackgroundColorOverride(cls, color: Optional['DOM.RGBA'] = None, ): return ( cls.build_send_payload("setDefaultBackgroundColorOverride", { "color": color, }), None )
Sets or clears an override of the default background color of the frame. This override is used if the content does not specify one. :param color: RGBA of the default background color. If not specified, any existing override will be cleared. :type color: DOM.RGBA
https://github.com/chuckus/chromewhip/blob/7249f64f96df3c6ca0859a3da06ce7ddcebbfded/chromewhip/protocol/emulation.py#L112-L126
import logging from typing import Any, Optional, Union from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase log = logging.getLogger(__name__) from chromewhip.protocol import dom as DOM from chromewhip.protocol import page as Page from chromewhip.protocol import runtime as Runtime class ScreenOrientation(ChromeTypeBase): def __init__(self, type: Union['str'], angle: Union['int'], ): self.type = type self.angle = angle VirtualTimePolicy = str class Emulation(PayloadMixin): @classmethod def canEmulate(cls): return ( cls.build_send_payload("canEmulate", { }), cls.convert_payload({ "result": { "class": bool, "optional": False }, }) ) @classmethod def clearDeviceMetricsOverride(cls): return ( cls.build_send_payload("clearDeviceMetricsOverride", { }), None ) @classmethod def clearGeolocationOverride(cls): return ( cls.build_send_payload("clearGeolocationOverride", { }), None ) @classmethod def resetPageScaleFactor(cls): return ( cls.build_send_payload("resetPageScaleFactor", { }), None ) @classmethod def setFocusEmulationEnabled(cls, enabled: Union['bool'], ): return ( cls.build_send_payload("setFocusEmulationEnabled", { "enabled": enabled, }), None ) @classmethod def setCPUThrottlingRate(cls, rate: Union['float'], ): return ( cls.build_send_payload("setCPUThrottlingRate", { "rate": rate, }), None ) @classmethod
MIT License
facelessuser/subclrschm
subclrschm/lib/gui/custom_statusbar.py
CustomStatusBar.__init__
python
def __init__(self, parent, name, fields=None): field_array = [-1] if not fields else fields[:] super(CustomStatusBar, self).__init__( parent, id=wx.ID_ANY, style=wx.STB_DEFAULT_STYLE, name=name ) self.sb_setup(field_array)
Init the CustomStatusBar object.
https://github.com/facelessuser/subclrschm/blob/52cf5bc39bac6e3dd6d44061cd0c005cdc9a41d1/subclrschm/lib/gui/custom_statusbar.py#L259-L269
from __future__ import unicode_literals from collections import OrderedDict import wx import wx.lib.agw.supertooltip from .. import util if wx.VERSION > (2, 9, 4): def monkey_patch(): import inspect import re target_line = re.compile(r'([ ]{8})(maxWidth = max\(bmpWidth\+\(textWidth\+self._spacing\*3\), maxWidth\)\n)') tt_source = inspect.getsourcelines(wx.lib.agw.supertooltip.ToolTipWindowBase.OnPaint)[0] count = 0 found = False for line in tt_source: if not found: m = target_line.match(line) if m: tt_source[count] = m.group(0) found = True count += 1 continue tt_source[count] = line[4:] count += 1 exec(''.join(tt_source)) wx.lib.agw.supertooltip.ToolTipWindowBase.OnPaint = locals()['OnPaint'] monkey_patch() class ContextMenu(wx.Menu): def __init__(self, parent, menu, pos): wx.Menu.__init__(self) self._callbacks = {} for i in menu: menuid = wx.NewId() item = wx.MenuItem(self, menuid, i[0]) self._callbacks[menuid] = i[1] self.Append(item) self.Bind(wx.EVT_MENU, self.on_callback, item) parent.PopupMenu(self, pos) def on_callback(self, event): menuid = event.GetId() self._callbacks[menuid](event) event.Skip() class ToolTip(wx.lib.agw.supertooltip.SuperToolTip): def __init__(self, target, message, header="", style="Office 2007 Blue", start_delay=.1): super(ToolTip, self).__init__(message, header=header) self.SetTarget(target) self.ApplyStyle(style) self.SetStartDelay(start_delay) target.tooltip = self def hide(self): if self._superToolTip: self._superToolTip.Destroy() class TimedStatusExtension(object): def set_timed_status(self, text, index=0): if self.text_timer[index].IsRunning(): self.text_timer[index].Stop() else: self.saved_text = self.GetStatusText(index) self.SetStatusText(text, index) self.text_timer[index].Start(5000, oneShot=True) def sb_time_setup(self, field_count): self.field_count = field_count self.saved_text = [""] * field_count self.text_timer = [wx.Timer(self)] * field_count count = 0 for x in self.text_timer: self.Bind(wx.EVT_TIMER, lambda event, index=count: self.clear_text(event, index), self.text_timer[count]) count += 1 def clear_text(self, event, index): self.SetStatusText(self.saved_text, index) def set_status(self, text, index=0): if self.text_timer[index].IsRunning(): self.text_timer[index].Stop() self.SetStatusText(text, index) class IconTrayExtension(object): fields = [-1] def sb_tray_setup(self): self.SetFieldsCount(len(self.fields) + 1) self.SetStatusText('', 0) self.SetStatusWidths(self.fields + [1]) self.sb_icons = OrderedDict() self.Bind(wx.EVT_SIZE, self.on_sb_size) def remove_icon(self, name): if name in self.sb_icons: self.hide_tooltip(name) self.sb_icons[name].Destroy() del self.sb_icons[name] self.place_icons(resize=True) def hide_tooltip(self, name): if self.sb_icons[name].tooltip: self.sb_icons[name].tooltip.hide() def set_icon( self, name, icon, msg=None, context=None, click_right=None, click_left=None, dclick_right=None, dclick_left=None ): if name in self.sb_icons: self.hide_tooltip(name) self.sb_icons[name].Destroy() bmp = wx.StaticBitmap(self) bmp.SetBitmap(label=icon) self.sb_icons[name] = bmp if msg is not None: ToolTip(self.sb_icons[name], msg) if click_left is not None: self.sb_icons[name].Bind(wx.EVT_LEFT_DOWN, click_left) if context is not None: self.sb_icons[name].Bind(wx.EVT_RIGHT_DOWN, lambda e: self.show_menu(name, context)) elif click_right is not None: self.sb_icons[name].Bind(wx.EVT_RIGHT_DOWN, click_right) if dclick_left is not None: self.sb_icons[name].Bind(wx.EVT_LEFT_DCLICK, dclick_left) if dclick_right is not None: self.sb_icons[name].Bind(wx.EVT_RIGHT_DCLICK, dclick_right) self.place_icons(resize=True) def show_menu(self, name, context): self.hide_tooltip(name) ContextMenu(self, context, self.sb_icons[name].GetPosition()) def place_icons(self, resize=False): x_offset = 0 if resize: platform = util.platform() if platform in "osx": self.SetStatusWidths([-1, len(self.sb_icons) * 20 + 10]) elif platform == "windows": if len(self.sb_icons): self.SetStatusWidths([-1, (len(self.sb_icons) - 1) * 20 + 1]) else: self.SetStatusWidths([-1, len(self.sb_icons) * 20 + 1]) else: self.SetStatusWidths([-1, len(self.sb_icons) * 20 + 1]) rect = self.GetFieldRect(len(self.fields)) for v in self.sb_icons.values(): v.SetPosition((rect.x + x_offset, rect.y)) v.Hide() v.Show() x_offset += 20 def on_sb_size(self, event): event.Skip() self.place_icons() class CustomStatusExtension(IconTrayExtension, TimedStatusExtension): def sb_setup(self, fields): self.fields = fields self.sb_tray_setup() self.sb_time_setup(len(self.fields)) class CustomStatusBar(wx.StatusBar, CustomStatusExtension):
MIT License
nlesc/yeap16-ai-3d-printing
deepy3d/util.py
get_closest_factors
python
def get_closest_factors(number): a = int(np.sqrt(number)) while number % a != 0: a -= 1 b = number/a if a == 1 or b == 1: a, b = get_closest_factors(number + 1) return a, b
Find the 2 factors of a number that are closest together.
https://github.com/nlesc/yeap16-ai-3d-printing/blob/4f15c1851d819290dc7a922c9470a76ff458945c/deepy3d/util.py#L40-L53
import numpy as np def block_index(num_blocks, len_list): if num_blocks < 1: ValueError('Number of blocks must be larger or equal to 1') if len_list < num_blocks: ValueError('Length of list must be larger than number of blocks.') lin_list = np.linspace(0, num_blocks, len_list, endpoint=False) return np.floor(lin_list).astype('uint8') def limit_mem(): K.get_session().close() cfg = K.tf.ConfigProto() cfg.gpu_options.allow_growth = True K.set_session(K.tf.Session(config=cfg))
Apache License 2.0
tonyfischetti/sake
sakelib/build.py
write_shas_to_shastore
python
def write_shas_to_shastore(sha_dict): if sys.version_info[0] < 3: fn_open = open else: fn_open = io.open with fn_open(".shastore", "w") as fh: fh.write("---\n") fh.write('sake version: {}\n'.format(constants.VERSION)) if sha_dict: fh.write(yaml.dump(sha_dict)) fh.write("...")
Writes a sha1 dictionary stored in memory to the .shastore file
https://github.com/tonyfischetti/sake/blob/818f1b1ad97a0d7bcf2c9e0082affb2865b25f26/sakelib/build.py#L116-L130
from __future__ import unicode_literals from __future__ import print_function import glob import hashlib import io import locale from multiprocessing import Pool import networkx as nx import os.path import shlex from subprocess import Popen, PIPE import sys import yaml from . import acts from . import constants ERROR_FN = sys.stderr.write def check_shastore_version(from_store, settings): sprint = settings["sprint"] error = settings["error"] sprint("checking .shastore version for potential incompatibilities", level="verbose") if not from_store or 'sake version' not in from_store: errmes = ["Since you've used this project last, a new version of ", "sake was installed that introduced backwards incompatible", " changes. Run 'sake clean', and rebuild before continuing\n"] errmes = " ".join(errmes) error(errmes) sys.exit(1) def get_sha(a_file, settings=None): if settings: error = settings["error"] else: error = ERROR_FN try: BLOCKSIZE = 65536 hasher = hashlib.sha1() with io.open(a_file, "rb") as fh: buf = fh.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = fh.read(BLOCKSIZE) the_hash = hasher.hexdigest() except IOError: errmes = "File '{}' could not be read! Exiting!".format(a_file) error(errmes) sys.exit(1) except: errmes = "Unspecified error returning sha1 hash. Exiting!" error(errmes) sys.exit(1) return the_hash
MIT License
flow-dev/robustvideomatting
inference.py
auto_downsample_ratio
python
def auto_downsample_ratio(h, w): return min(512 / max(h, w), 1)
Automatically find a downsample ratio so that the largest side of the resolution be 512px.
https://github.com/flow-dev/robustvideomatting/blob/b8848d58188fcc1e56edc7c8636aabdae1971284/inference.py#L154-L158
import torch import os from torch.utils.data import DataLoader from torchvision import transforms from typing import Optional, Tuple from tqdm.auto import tqdm from inference_utils import VideoReader, VideoWriter, ImageSequenceReader, ImageSequenceWriter def convert_video(model, input_source: str, input_resize: Optional[Tuple[int, int]] = None, downsample_ratio: Optional[float] = None, output_type: str = 'video', output_composition: Optional[str] = None, output_alpha: Optional[str] = None, output_foreground: Optional[str] = None, output_video_mbps: Optional[float] = None, seq_chunk: int = 1, num_workers: int = 0, progress: bool = True, device: Optional[str] = None, dtype: Optional[torch.dtype] = None): assert downsample_ratio is None or (downsample_ratio > 0 and downsample_ratio <= 1), 'Downsample ratio must be between 0 (exclusive) and 1 (inclusive).' assert any([output_composition, output_alpha, output_foreground]), 'Must provide at least one output.' assert output_type in ['video', 'png_sequence'], 'Only support "video" and "png_sequence" output modes.' assert seq_chunk >= 1, 'Sequence chunk must be >= 1' assert num_workers >= 0, 'Number of workers must be >= 0' assert output_video_mbps == None or output_type == 'video', 'Mbps is not available for png_sequence output.' if input_resize is not None: transform = transforms.Compose([ transforms.Resize(input_resize[::-1]), transforms.ToTensor() ]) else: transform = transforms.ToTensor() if os.path.isfile(input_source): source = VideoReader(input_source, transform) else: source = ImageSequenceReader(input_source, transform) reader = DataLoader(source, batch_size=seq_chunk, pin_memory=True, num_workers=num_workers) if output_type == 'video': frame_rate = source.frame_rate if isinstance(source, VideoReader) else 30 output_video_mbps = 1 if output_video_mbps is None else output_video_mbps if output_composition is not None: writer_com = VideoWriter( path=output_composition, frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000)) if output_alpha is not None: writer_pha = VideoWriter( path=output_alpha, frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000)) if output_foreground is not None: writer_fgr = VideoWriter( path=output_foreground, frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000)) else: if output_composition is not None: writer_com = ImageSequenceWriter(output_composition, 'png') if output_alpha is not None: writer_pha = ImageSequenceWriter(output_alpha, 'png') if output_foreground is not None: writer_fgr = ImageSequenceWriter(output_foreground, 'png') model = model.eval() if device is None or dtype is None: param = next(model.parameters()) dtype = param.dtype device = param.device if (output_composition is not None) and (output_type == 'video'): bgr = torch.tensor([120, 255, 155], device=device, dtype=dtype).div(255).view(1, 1, 3, 1, 1) try: with torch.no_grad(): bar = tqdm(total=len(source), disable=not progress, dynamic_ncols=True) rec = [None] * 4 for src in reader: if downsample_ratio is None: downsample_ratio = auto_downsample_ratio(*src.shape[2:]) src = src.to(device, dtype, non_blocking=True).unsqueeze(0) fgr, pha, *rec = model(src, *rec, downsample_ratio) if output_foreground is not None: writer_fgr.write(fgr[0]) if output_alpha is not None: writer_pha.write(pha[0]) if output_composition is not None: if output_type == 'video': com = fgr * pha + bgr * (1 - pha) else: fgr = fgr * pha.gt(0) com = torch.cat([fgr, pha], dim=-3) writer_com.write(com[0]) bar.update(src.size(1)) finally: if output_composition is not None: writer_com.close() if output_alpha is not None: writer_pha.close() if output_foreground is not None: writer_fgr.close()
Apache License 2.0
avast/retdec-regression-tests-framework
tests/parsers/c_parser/stmts/statement_tests.py
StatementTests.get_return_stmt
python
def get_return_stmt(self, code): func = self.insert_into_function_body(code) return func.return_stmts[0]
Returns the first return stmt in the given code.
https://github.com/avast/retdec-regression-tests-framework/blob/a8d024475bf76cd6acdee3c9df3a3d38a2ec63df/tests/parsers/c_parser/stmts/statement_tests.py#L52-L55
from unittest import mock from regression_tests.parsers.c_parser.stmts.break_stmt import BreakStmt from regression_tests.parsers.c_parser.stmts.continue_stmt import ContinueStmt from regression_tests.parsers.c_parser.stmts.do_while_loop import DoWhileLoop from regression_tests.parsers.c_parser.stmts.empty_stmt import EmptyStmt from regression_tests.parsers.c_parser.stmts.for_loop import ForLoop from regression_tests.parsers.c_parser.stmts.goto_stmt import GotoStmt from regression_tests.parsers.c_parser.stmts.if_stmt import IfStmt from regression_tests.parsers.c_parser.stmts.return_stmt import ReturnStmt from regression_tests.parsers.c_parser.stmts.statement import Statement from regression_tests.parsers.c_parser.stmts.switch_stmt import SwitchStmt from regression_tests.parsers.c_parser.stmts.while_loop import WhileLoop from tests.parsers.c_parser import WithModuleTests class StatementTests(WithModuleTests): def insert_into_function_body(self, code): return self.get_func(""" void func() { %s } """ % code, 'func') def get_for_loop(self, code): func = self.insert_into_function_body(code) return func.for_loops[0] def get_while_loop(self, code): func = self.insert_into_function_body(code) return func.while_loops[0] def get_do_while_loop(self, code): func = self.insert_into_function_body(code) return func.do_while_loops[0] def get_if_stmt(self, code): func = self.insert_into_function_body(code) return func.if_stmts[0]
MIT License
openshift/kuryr-kubernetes
kuryr_kubernetes/controller/drivers/base.py
PodSubnetsDriver.get_subnets
python
def get_subnets(self, pod, project_id): raise NotImplementedError()
Get subnets for Pod. :param pod: dict containing Kubernetes Pod object :param project_id: OpenStack project ID :return: dict containing the mapping 'subnet_id' -> 'network' for all the subnets we want to create ports on, where 'network' is an `os_vif.network.Network` object containing a single `os_vif.subnet.Subnet` object corresponding to the 'subnet_id'
https://github.com/openshift/kuryr-kubernetes/blob/7b2e7f83b91fa711d1a506c451be8f1143cdcd86/kuryr_kubernetes/controller/drivers/base.py#L148-L158
import abc from kuryr.lib._i18n import _ from stevedore import driver as stv_driver from kuryr_kubernetes import config _DRIVER_NAMESPACE_BASE = 'kuryr_kubernetes.controller.drivers' _DRIVER_MANAGERS = {} _MULTI_VIF_DRIVERS = [] class DriverBase(object): @classmethod def get_instance(cls, specific_driver=None, scope='default'): alias = cls.ALIAS if specific_driver: driver_key = '{}:{}:{}'.format(alias, specific_driver, scope) else: driver_key = '{}:_from_cfg:{}'.format(alias, scope) try: manager = _DRIVER_MANAGERS[driver_key] except KeyError: driver_name = (specific_driver or config.CONF.kubernetes[alias + '_driver']) manager = stv_driver.DriverManager( namespace="%s.%s" % (_DRIVER_NAMESPACE_BASE, alias), name=driver_name, invoke_on_load=True) _DRIVER_MANAGERS[driver_key] = manager driver = manager.driver if not isinstance(driver, cls): raise TypeError(_("Invalid %(alias)r driver type: %(driver)s, " "must be a subclass of %(type)s") % { 'alias': alias, 'driver': driver.__class__.__name__, 'type': cls}) return driver def __str__(self): return self.__class__.__name__ class PodProjectDriver(DriverBase, metaclass=abc.ABCMeta): ALIAS = 'pod_project' @abc.abstractmethod def get_project(self, pod): raise NotImplementedError() class ServiceProjectDriver(DriverBase, metaclass=abc.ABCMeta): ALIAS = 'service_project' @abc.abstractmethod def get_project(self, service): raise NotImplementedError() class NamespaceProjectDriver(DriverBase, metaclass=abc.ABCMeta): ALIAS = 'namespace_project' @abc.abstractmethod def get_project(self, namespace): raise NotImplementedError() class PodSubnetsDriver(DriverBase, metaclass=abc.ABCMeta): ALIAS = 'pod_subnets' @abc.abstractmethod
Apache License 2.0
kcyu2014/eval-nas
search_policies/cnn/random_policy/nasbench_weight_sharing_policy.py
NasBenchWeightSharingPolicy.run
python
def run(self): train_queue, valid_queue, test_queue, criterion = self.initialize_run() args = self.args model, optimizer, scheduler = self.initialize_model() fitness_dict = {} self.optimizer = optimizer self.scheduler = scheduler logging.info(">> Begin the search with supernet method :".format(args.supernet_train_method)) for epoch in range(args.epochs): scheduler.step() lr = scheduler.get_lr()[0] train_acc, train_obj = self.train_fn(train_queue, valid_queue, model, criterion, optimizer, lr) self.logging_fn(train_acc, train_obj, epoch, 'Train', display_dict={'lr': lr}) valid_acc, valid_obj = self.validate_model(model, valid_queue, self.model_spec_id, self.model_spec) self.logging_fn(valid_acc, valid_obj, epoch, 'Valid') if not self.check_should_save(epoch): continue self.save_duplicate_arch_pool('valid', epoch) fitness_dict = self.evaluate(epoch, test_queue, fitnesses_dict=fitness_dict, train_queue=train_queue) utils.save_checkpoint(model, optimizer, self.running_stats, self.exp_dir) self.save_results(epoch, rank_details=True) ep_k = [k for k in self.ranking_per_epoch.keys()][-1] best_id = self.ranking_per_epoch[ep_k][-1][1].geno_id return best_id, self.search_space.nasbench_model_specs[best_id]
Procedure of training. This run describes the entire training procedure. :return:
https://github.com/kcyu2014/eval-nas/blob/385376a3ef96336b54ee7e696af1d02b97aa5c32/search_policies/cnn/random_policy/nasbench_weight_sharing_policy.py#L141-L177
import os import gc import logging import operator import IPython import shutil import numpy as np import torch from functools import partial from collections import namedtuple, OrderedDict, deque import utils from search_policies.cnn.cnn_general_search_policies import CNNSearchPolicy from search_policies.cnn.enas_policy.enas_micro.data.data import RepeatedDataLoader from search_policies.cnn.search_space.nas_bench.nasbench_search_space import NASbenchSearchSpace, NasBenchSearchSpaceLinear, NasBenchSearchSpaceSubsample, NasBenchSearchSpaceICLRInfluenceWS from search_policies.cnn import model as model_module import search_policies.cnn.procedures as procedure_ops from search_policies.cnn.search_space.nas_bench.util import change_model_spec from search_policies.cnn.utils import AverageMeter from visualization.process_data import tensorboard_summarize_list Rank = namedtuple('Rank', 'valid_acc valid_obj geno_id gt_rank') class NasBenchWeightSharingPolicy(CNNSearchPolicy): trained_model_spec_ids = [] eval_result = OrderedDict() model_spec = None model_spec_id = None @property def nasbench_model_specs(self): return self.search_space.nasbench_hashs @property def nasbench_hashs(self): return self.search_space.nasbench_hashs @property def evaluate_model_spec_ids(self): return self.search_space.evaluate_model_spec_ids def evaluate_model_spec_id_pool(self): return self.search_space.evaluate_model_spec_id_pool() def model_spec_by_id(self, mid): return self.search_space.nasbench_model_specs[mid] def random_sampler(self, model, architect, args): rand_spec_id, rand_spec = self.search_space.random_topology() self.model_spec_id = rand_spec_id self.model_spec = rand_spec new_model = change_model_spec(model, rand_spec) self.trained_model_spec_ids.append(rand_spec_id) return new_model def op_sampler(self, model, architect, args): spec = self.model_spec ops = spec.ops avail_ops = self.search_space.available_ops try: op_vs_choice = np.tile(np.arange(len(avail_ops)), (len(ops)-2, 1)) op_vs_choice = np.apply_along_axis(np.random.permutation, 1, op_vs_choice).transpose() for i in range(len(avail_ops)): new_ops = [avail_ops[ind] for ind in op_vs_choice[i]] spec.ops = ['input',] + new_ops + ['output'] yield change_model_spec(model, spec) except ValueError as e: logging.warning(f'Op sampler: received exception {e}, return the original model without any op sampling.') yield model def __init__(self, args, full_dataset=False): super(NasBenchWeightSharingPolicy, self).__init__( args=args, sub_dir_path='{}_SEED_{}'.format(args.supernet_train_method, args.seed) ) self.args = args if args.search_space == 'nasbench': self.model_fn = model_module.NasBenchNetSearch self.search_space = NASbenchSearchSpace(args, full_dataset=full_dataset) elif args.search_space == 'nasbench_linear': self.model_fn = model_module.NasBenchNetSearch self.search_space = NasBenchSearchSpaceLinear(args) elif args.search_space == 'nasbench_subspace': self.model_fn = model_module.NasBenchNetSearch self.search_space = NasBenchSearchSpaceSubsample(args) elif args.search_space == 'nasbench_iclr_wsinfluence': self.model_fn = model_module.NasBenchNetSearch self.search_space = NasBenchSearchSpaceICLRInfluenceWS(args) else: raise NotImplementedError("Other search space not supported at this moment.") self.counter = 0
MIT License
virgesmith/ukcensusapi
ukcensusapi/Nomisweb.py
_get_api_key
python
def _get_api_key(cache_dir): filename = cache_dir / "NOMIS_API_KEY" if os.path.isfile(str(filename)): with open(str(filename), "r") as file: content = file.readlines() return None if len(content) == 0 else content[0].replace("\n","") return os.environ.get("NOMIS_API_KEY")
Look for key in file NOMIS_API_KEY in cache dir, falling back to env var
https://github.com/virgesmith/ukcensusapi/blob/b78a753375665d1aa05c0d30813d7e533834d015/ukcensusapi/Nomisweb.py#L21-L31
import os import json import hashlib import warnings from pathlib import Path from collections import OrderedDict from urllib import request from urllib.error import HTTPError from urllib.error import URLError from urllib.parse import urlencode from socket import timeout import pandas as pd import ukcensusapi.utils as utils
MIT License
clericpy/torequests
torequests/dummy.py
Loop.wait_all_tasks_done
python
def wait_all_tasks_done(self, timeout=NotSet, delay: float = 0.5, interval: float = 0.1): timeout = self._timeout if timeout is NotSet else timeout timeout = timeout or float("inf") start_time = time_time() time_sleep(delay) while 1: if not self.todo_tasks: return self.all_tasks if time_time() - start_time > timeout: return self.done_tasks time_sleep(interval)
Block, only be used while loop running in a single non-main thread. Not SMART!
https://github.com/clericpy/torequests/blob/e57ce331aa850db45c198dc90b9d01e437384b61/torequests/dummy.py#L302-L316
from asyncio import (Future, Queue, Task, TimeoutError, as_completed, gather, get_event_loop, iscoroutine, new_event_loop, sleep, wait, wait_for) from asyncio.futures import _chain_future from concurrent.futures import ALL_COMPLETED from functools import wraps from time import sleep as time_sleep from time import time as time_time from typing import (Callable, Coroutine, Dict, List, Optional, Sequence, Set, Union) from urllib.parse import urlparse from aiohttp import BasicAuth, ClientError, ClientSession, ClientTimeout from ._py3_patch import (NewResponse, NotSet, _ensure_can_be_await, _exhaust_simple_coro, _py36_all_task_patch, logger) from .exceptions import FailureException, ValidationError from .frequency_controller.async_tools import AsyncFrequency as Frequency from .main import Error, NewFuture, Pool, ProcessPool __all__ = "NewTask Loop Asyncme coros Requests Workshop".split(" ") class NewTask(Task): _PENDING = "PENDING" _CANCELLED = "CANCELLED" _FINISHED = "FINISHED" _RESPONSE_ARGS = ("encoding", "request_encoding", "content") def __init__(self, coro, *, loop=None, callback: Union[Callable, Sequence] = None, extra_args=None): assert iscoroutine(coro), repr(coro) super().__init__(coro, loop=loop) self._callback_result = NotSet self.extra_args = extra_args or () self.task_start_time = time_time() self.task_end_time = 0.0 self.task_cost_time = 0.0 if callback: if not isinstance(callback, (list, tuple, set)): callback = [callback] self.add_done_callback(self.set_task_time) for fn in callback: self.add_done_callback(self.wrap_callback(fn)) @staticmethod def wrap_callback(function: Callable) -> Callable: @wraps(function) def wrapped(task): task._callback_result = function(task) return task._callback_result return wrapped @staticmethod def set_task_time(task: 'NewTask'): task.task_end_time = time_time() task.task_cost_time = task.task_end_time - task.task_start_time @property def _done_callbacks(self): return self._callbacks @property def cx(self): return self.callback_result @property def callback_result(self): if self._state == self._PENDING: self._loop.run_until_complete(self) if self._callback_result is NotSet: result = self.result() else: result = self._callback_result return result @property def x(self): if self._state == self._PENDING: self._loop.run_until_complete(self) return self.result() def __getattr__(self, name): return getattr(self.x, name) def __setattr__(self, name, value): if name in self._RESPONSE_ARGS: self.x.__setattr__(name, value) else: object.__setattr__(self, name, value) class Loop: def __init__(self, n: int = None, interval: float = 0, timeout: Optional[float] = None, default_callback: Optional[Callable] = None, loop=None, **kwargs): self._loop = loop self.default_callback = default_callback self.async_running = False self._timeout = timeout self.frequency = Frequency(n, interval) @property def loop(self): if self._loop is None: try: self._loop = get_event_loop() except RuntimeError: self._loop = new_event_loop() elif self._loop.is_closed(): self._loop = new_event_loop() return self._loop def _wrap_coro_function_with_frequency(self, coro_func): @wraps(coro_func) async def new_coro_func(*args, **kwargs): if self.frequency: async with self.frequency: result = await coro_func(*args, **kwargs) return result else: result = await coro_func(*args, **kwargs) return result return new_coro_func def run_in_executor(self, executor=None, func=None, *args): return self.loop.run_in_executor(executor, func, *args) def run_in_thread_pool(self, pool_size=None, func=None, *args): executor = Pool(pool_size) return self.loop.run_in_executor(executor, func, *args) def run_in_process_pool(self, pool_size=None, func=None, *args): executor = ProcessPool(pool_size) return self.loop.run_in_executor(executor, func, *args) def run_coroutine_threadsafe(self, coro, loop=None, callback=None): if not iscoroutine(coro): raise TypeError("A await in coroutines. object is required") loop = loop or self.loop future = NewFuture(callback=callback) def callback_func(): try: _chain_future(NewTask(coro, loop=loop), future) except Exception as exc: if future.set_running_or_notify_cancel(): future.set_exception(exc) raise loop.call_soon_threadsafe(callback_func) return future def apply(self, coro_function: Callable, args: Optional[Sequence] = None, kwargs: Optional[dict] = None, callback: Optional[Callable] = None): args = args or () kwargs = kwargs or {} coro = self._wrap_coro_function_with_frequency(coro_function)(*args, **kwargs) return self.submit(coro, callback=callback) def submit(self, coro, callback: Optional[Callable] = None): callback = callback or self.default_callback if self.async_running: return self.run_coroutine_threadsafe(coro, callback=callback) else: return NewTask(coro, loop=self.loop, callback=callback) def submitter(self, f: Callable) -> Callable: f = self._wrap_coro_function_with_frequency(f) @wraps(f) def wrapped(*args, **kwargs): return self.submit(f(*args, **kwargs)) return wrapped @property def x(self): return self.run() async def wait(self, fs, timeout=None, return_when=ALL_COMPLETED): if fs: return await wait(fs, timeout=timeout, return_when=return_when) @property def todo_tasks(self) -> List[Task]: tasks = [ task for task in self.all_tasks if task._state == NewTask._PENDING ] return tasks @property def done_tasks(self) -> List[Task]: tasks = [ task for task in self.all_tasks if task._state != NewTask._PENDING ] return tasks def run(self, tasks: List[Task] = None, timeout=NotSet): timeout = self._timeout if timeout is NotSet else timeout if self.async_running or self.loop.is_running(): return self.wait_all_tasks_done(timeout) else: tasks = [task for task in tasks or self.todo_tasks] return self.loop.run_until_complete( self.wait(tasks, timeout=timeout))
MIT License
ying-wen/malib_deprecated
malib/utils/tf_utils.py
soft_variables_update
python
def soft_variables_update( source_variables, target_variables, tau=1.0, sort_variables_by_name=False, name=None ): if tau < 0 or tau > 1: raise ValueError("Input `tau` should be in [0, 1].") updates = [] op_name = "soft_variables_update" if name is not None: op_name = "{}_{}".format(name, op_name) if tau == 0.0 or not source_variables or not target_variables: return tf.no_op(name=op_name) if sort_variables_by_name: source_variables = sorted(source_variables, key=lambda x: x.name) target_variables = sorted(target_variables, key=lambda x: x.name) for (v_s, v_t) in zip(source_variables, target_variables): v_t.shape.assert_is_compatible_with(v_s.shape) if tau == 1.0: update = v_t.assign(v_s) else: update = v_t.assign((1 - tau) * v_t + tau * v_s) updates.append(update) return tf.group(*updates, name=op_name)
Performs a soft/hard update of variables from the source to the target. For each variable v_t in target variables and its corresponding variable v_s in source variables, a soft update is: v_t = (1 - tau) * v_t + tau * v_s When tau is 1.0 (the default), then it does a hard update: v_t = v_s Args: source_variables: list of source variables. target_variables: list of target variables. tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard update. sort_variables_by_name: A bool, when True would sort the variables by name before doing the update. name: A string, name. Returns: An operation that updates target variables from source variables. Raises: ValueError: if tau is not in [0, 1].
https://github.com/ying-wen/malib_deprecated/blob/875338b81c4d87064ad31201f461ef742db05f25/malib/utils/tf_utils.py#L7-L48
import tensorflow as tf EPS = 1e-6
MIT License
dedsecinside/awesome-scripts
Machine Learning & AI/Linear_Regression_with_Gradient_Descent.py
r2_alpha
python
def r2_alpha(r2, alphas): plt.plot(alphas, r2) plt.title('R^2 vs. Learning Rate') print(max(r2)) print(np.linspace(0.001,1,10)[r2.index(max(r2))])
Plot r2 alpha Args: r2: (todo): write your description alphas: (array): write your description
https://github.com/dedsecinside/awesome-scripts/blob/856835e5ff5f8a6af2d74bb25800c620feb712e3/Machine Learning & AI/Linear_Regression_with_Gradient_Descent.py#L150-L165
import random import matplotlib.pyplot as plt import numpy as np def sse(n,a0,a1,x,y): s=0 mean=np.mean(y) for i in range(n): s+=(a0+a1*x[i]-mean)**2 return s/(2*n) def cost(n,a0,a1,x,y,ch,p=2): s=0; if ch=='sum-of-squares': for i in range(n): s+=(a0+a1*x[i]-y[i])**2 return s/(2*n) if ch=='l-p norm': for i in range(n): s+=abs(a0+a1*x[i]-y[i])**p return s**(1/p) def dela0(n,a0,a1,x,y): s=0; for i in range(n): s+=(a0+a1*x[i]-y[i]) return s/n def dela1(n,a0,a1,x,y): s=0; for i in range(n): s+=(a0+a1*x[i]-y[i])*x[i] return s/n def predict(x, y, alphas=np.linspace(0.001,1,10), it=1000): mx=max(x) my=max(y) for i in range(len(x)): x[i]/=mx y[i]/=my coeff, r2, cos = grad_desc(x, y, alphas, it) coeff, x, y = rescale(x, y, coeff, mx, my) r2_alpha(r2, alphas) plot_predict(x, y, coeff, r2, cos, it) a0=coeff[r2.index(max(r2))][0] a1=coeff[r2.index(max(r2))][1] pred=a0+x*a1 return pred def grad_desc(x, y, alphas, it): r2=[] coeff=[] for alpha in alphas: cos=[] a0=random.random() a1=random.random() for i in range(it): temp0=a0-alpha*dela0(len(x),a0,a1,x,y) temp1=a1-alpha*dela1(len(x),a0,a1,x,y) a0=temp0 a1=temp1 cos.append(cost(len(x),a0,a1,x,y,ch='sum-of-squares')) r2.append(1-(cos[-1]/(cos[-1]+sse(len(x),a0,a1,x,y)))) coeff.append([a0,a1]) return coeff, r2, cos
MIT License
ing-bank/skorecard
skorecard/features_bucket_mapping.py
merge_features_bucket_mapping
python
def merge_features_bucket_mapping(a: FeaturesBucketMapping, b: FeaturesBucketMapping) -> FeaturesBucketMapping: assert isinstance(a, FeaturesBucketMapping) assert isinstance(b, FeaturesBucketMapping) cols_in_both = [col for col in a.columns if col in b.columns] cols_in_a = [col for col in a.columns if col not in b.columns] cols_in_b = [col for col in b.columns if col not in a.columns] features_bucket_mapping = FeaturesBucketMapping() for col in cols_in_both: c = merge_bucket_mapping(a.get(col), b.get(col)) features_bucket_mapping.append(c) for col in cols_in_a: features_bucket_mapping.append(a.get(col)) for col in cols_in_b: features_bucket_mapping.append(b.get(col)) return features_bucket_mapping
Merge two sets of sequentual FeatureBucketMapping. If there are unique features, we'll add them as-in.
https://github.com/ing-bank/skorecard/blob/8ab8d38db9385aab049a7a8bef4d5f235d3f46ce/skorecard/features_bucket_mapping.py#L161-L186
import yaml import dataclasses from skorecard.bucket_mapping import BucketMapping, merge_bucket_mapping class FeaturesBucketMapping: def __init__(self, maps=[]): self.maps = {} if isinstance(maps, list): for bucketmap in maps: self.append(bucketmap) if isinstance(maps, dict): for _, bucketmap in maps.items(): if not isinstance(bucketmap, BucketMapping): bucketmap = BucketMapping(**bucketmap) self.append(bucketmap) def __repr__(self): class_name = self.__class__.__name__ maps = list(self.maps.values()) return f"{class_name}({maps})" def __len__(self): return len(self.maps) def __eq__(self, other): return self.maps == other.maps def __getitem__(self, key): return self.maps[key] def __setitem__(self, key, value): self.maps[key] = value def get(self, col: str): return self.maps[col] def append(self, bucketmap: BucketMapping) -> None: assert isinstance(bucketmap, BucketMapping) self.maps[bucketmap.feature_name] = bucketmap def load_yml(self) -> None: raise NotImplementedError("todo") def save_yml(self, file) -> None: if isinstance(file, str): file = open(file, "w") yaml.safe_dump(self.as_dict(), file) def load_dict(self, obj): assert isinstance(obj, dict) self.maps = {} for feature, bucketmap in obj.items(): self.append(BucketMapping(**bucketmap)) def as_dict(self): return {k: dataclasses.asdict(v) for k, v in self.maps.items()} @property def columns(self): return list(self.as_dict().keys())
MIT License
awslabs/aws-data-api
vendor/tornado/template.py
BaseLoader.__init__
python
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None, whitespace=None): self.autoescape = autoescape self.namespace = namespace or {} self.whitespace = whitespace self.templates = {} self.lock = threading.RLock()
Construct a template loader. :arg str autoescape: The name of a function in the template namespace, such as "xhtml_escape", or ``None`` to disable autoescaping by default. :arg dict namespace: A dictionary to be added to the default template namespace, or ``None``. :arg str whitespace: A string specifying default behavior for whitespace in templates; see `filter_whitespace` for options. Default is "single" for files ending in ".html" and ".js" and "all" for other files. .. versionchanged:: 4.3 Added ``whitespace`` parameter.
https://github.com/awslabs/aws-data-api/blob/81f6ad1fd89935fcec600ced2b404f37d87254fe/vendor/tornado/template.py#L385-L411
from __future__ import absolute_import, division, print_function import datetime import linecache import os.path import posixpath import re import threading from tornado import escape from tornado.log import app_log from tornado.util import ObjectDict, exec_in, unicode_type, PY3 if PY3: from io import StringIO else: from cStringIO import StringIO _DEFAULT_AUTOESCAPE = "xhtml_escape" _UNSET = object() def filter_whitespace(mode, text): if mode == 'all': return text elif mode == 'single': text = re.sub(r"([\t ]+)", " ", text) text = re.sub(r"(\s*\n\s*)", "\n", text) return text elif mode == 'oneline': return re.sub(r"(\s+)", " ", text) else: raise Exception("invalid whitespace mode %s" % mode) class Template(object): def __init__(self, template_string, name="<string>", loader=None, compress_whitespace=_UNSET, autoescape=_UNSET, whitespace=None): self.name = escape.native_str(name) if compress_whitespace is not _UNSET: if whitespace is not None: raise Exception("cannot set both whitespace and compress_whitespace") whitespace = "single" if compress_whitespace else "all" if whitespace is None: if loader and loader.whitespace: whitespace = loader.whitespace else: if name.endswith(".html") or name.endswith(".js"): whitespace = "single" else: whitespace = "all" filter_whitespace(whitespace, '') if autoescape is not _UNSET: self.autoescape = autoescape elif loader: self.autoescape = loader.autoescape else: self.autoescape = _DEFAULT_AUTOESCAPE self.namespace = loader.namespace if loader else {} reader = _TemplateReader(name, escape.native_str(template_string), whitespace) self.file = _File(self, _parse(reader, self)) self.code = self._generate_python(loader) self.loader = loader try: self.compiled = compile( escape.to_unicode(self.code), "%s.generated.py" % self.name.replace('.', '_'), "exec", dont_inherit=True) except Exception: formatted_code = _format_code(self.code).rstrip() app_log.error("%s code:\n%s", self.name, formatted_code) raise def generate(self, **kwargs): namespace = { "escape": escape.xhtml_escape, "xhtml_escape": escape.xhtml_escape, "url_escape": escape.url_escape, "json_encode": escape.json_encode, "squeeze": escape.squeeze, "linkify": escape.linkify, "datetime": datetime, "_tt_utf8": escape.utf8, "_tt_string_types": (unicode_type, bytes), "__name__": self.name.replace('.', '_'), "__loader__": ObjectDict(get_source=lambda name: self.code), } namespace.update(self.namespace) namespace.update(kwargs) exec_in(self.compiled, namespace) execute = namespace["_tt_execute"] linecache.clearcache() return execute() def _generate_python(self, loader): buffer = StringIO() try: named_blocks = {} ancestors = self._get_ancestors(loader) ancestors.reverse() for ancestor in ancestors: ancestor.find_named_blocks(loader, named_blocks) writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template) ancestors[0].generate(writer) return buffer.getvalue() finally: buffer.close() def _get_ancestors(self, loader): ancestors = [self.file] for chunk in self.file.body.chunks: if isinstance(chunk, _ExtendsBlock): if not loader: raise ParseError("{% extends %} block found, but no " "template loader") template = loader.load(chunk.name, self.name) ancestors.extend(template._get_ancestors(loader)) return ancestors class BaseLoader(object):
Apache License 2.0
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_1/models/volume_response.py
VolumeResponse.__init__
python
def __init__( self, items=None, ): if items is not None: self.items = items
Keyword args: items (list[Volume]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_1/models/volume_response.py#L43-L52
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_1 import models class VolumeResponse(object): swagger_types = { 'items': 'list[Volume]' } attribute_map = { 'items': 'items' } required_args = { }
BSD 2-Clause Simplified License
docusign/docusign-python-client
docusign_esign/models/login_account.py
LoginAccount.is_default
python
def is_default(self): return self._is_default
Gets the is_default of this LoginAccount. # noqa: E501 This value is true if this is the default account for the user, otherwise false is returned. # noqa: E501 :return: The is_default of this LoginAccount. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/login_account.py#L187-L195
import pprint import re import six from docusign_esign.client.configuration import Configuration class LoginAccount(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account_id': 'str', 'account_id_guid': 'str', 'base_url': 'str', 'email': 'str', 'is_default': 'str', 'login_account_settings': 'list[NameValue]', 'login_user_settings': 'list[NameValue]', 'name': 'str', 'site_description': 'str', 'user_id': 'str', 'user_name': 'str' } attribute_map = { 'account_id': 'accountId', 'account_id_guid': 'accountIdGuid', 'base_url': 'baseUrl', 'email': 'email', 'is_default': 'isDefault', 'login_account_settings': 'loginAccountSettings', 'login_user_settings': 'loginUserSettings', 'name': 'name', 'site_description': 'siteDescription', 'user_id': 'userId', 'user_name': 'userName' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._account_id = None self._account_id_guid = None self._base_url = None self._email = None self._is_default = None self._login_account_settings = None self._login_user_settings = None self._name = None self._site_description = None self._user_id = None self._user_name = None self.discriminator = None setattr(self, "_{}".format('account_id'), kwargs.get('account_id', None)) setattr(self, "_{}".format('account_id_guid'), kwargs.get('account_id_guid', None)) setattr(self, "_{}".format('base_url'), kwargs.get('base_url', None)) setattr(self, "_{}".format('email'), kwargs.get('email', None)) setattr(self, "_{}".format('is_default'), kwargs.get('is_default', None)) setattr(self, "_{}".format('login_account_settings'), kwargs.get('login_account_settings', None)) setattr(self, "_{}".format('login_user_settings'), kwargs.get('login_user_settings', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('site_description'), kwargs.get('site_description', None)) setattr(self, "_{}".format('user_id'), kwargs.get('user_id', None)) setattr(self, "_{}".format('user_name'), kwargs.get('user_name', None)) @property def account_id(self): return self._account_id @account_id.setter def account_id(self, account_id): self._account_id = account_id @property def account_id_guid(self): return self._account_id_guid @account_id_guid.setter def account_id_guid(self, account_id_guid): self._account_id_guid = account_id_guid @property def base_url(self): return self._base_url @base_url.setter def base_url(self, base_url): self._base_url = base_url @property def email(self): return self._email @email.setter def email(self, email): self._email = email @property
MIT License
rikonor/vanguard-api
seleniumapis/browser/browser.py
Browser.find_element_by_any
python
def find_element_by_any(self, search_term): return self.find_element_by_id(search_term) or self.find_element_by_name(search_term)
Find an element by a search term Try ID, then fallback to Name
https://github.com/rikonor/vanguard-api/blob/5462b2327cacad68bedb945dc323d534ebbdfeee/seleniumapis/browser/browser.py#L37-L43
from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.common.exceptions import * from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC class Browser(object): def __init__(self): self.remote = "http://selenium:4444/wd/hub" self.browser_type = DesiredCapabilities.CHROME self.browser = None def start(self): self.driver = webdriver.Remote( command_executor=self.remote, desired_capabilities=self.browser_type) def get(self, path): self.driver.get(path) def close(self): self.driver.close() @property def inputs(self): return [] @property def title(self): return self.driver.title def contains_text(self, text): return text in self.driver.page_source
MIT License
tamasgal/km3pipe
km3pipe/io/daq.py
DAQPump.seek_to_frame
python
def seek_to_frame(self, index): pointer_position = self.frame_positions[index] self.blob_file.seek(pointer_position, 0)
Move file pointer to the frame with given index.
https://github.com/tamasgal/km3pipe/blob/c10fa39f72f3a8e384025712344378012fae5115/km3pipe/io/daq.py#L231-L234
from collections import namedtuple from io import BytesIO import json import math import struct from struct import unpack import time import pprint from urllib.request import urlopen, URLError import numpy as np from thepipe import Module, Blob from km3pipe.dataclasses import Table from km3pipe.sys import ignored from km3pipe.logger import get_logger, get_printer __author__ = "Tamas Gal" __copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration." __credits__ = [] __license__ = "MIT" __maintainer__ = "Tamas Gal" __email__ = "tgal@km3net.de" __status__ = "Development" log = get_logger(__name__) DATA_TYPES = { 101: "DAQSuperFrame", 201: "DAQSummaryFrame", 1001: "DAQTimeslice", 1002: "DAQTimeslice", 1003: "DAQTimeslice", 1004: "DAQTimeslice", 1005: "DAQTimeslice", 2001: "DAQSummaryslice", 10001: "DAQEvent", } MINIMAL_RATE_HZ = 2.0e3 MAXIMAL_RATE_HZ = 2.0e6 class TimesliceParser(Module): def configure(self): self.legacy = self.get("legacy", default=False) def _get_raw_data(self, blob): if "CHPrefix" in blob: if not str(blob["CHPrefix"].tag).startswith("IO_TS"): log.info("Not an IO_TS* blob") return blob return BytesIO(blob["CHData"]) if "FileIO" in blob: return blob["FileIO"] if "RawBytes" in blob: return BytesIO(blob["RawBytes"]) def process(self, blob): data = self._get_raw_data(blob) if data is None: return blob try: ts_info, ts_frameinfos, ts_hits = self._parse_timeslice(data) except struct.error: log.error("Could not parse Timeslice") log.error(blob.keys()) else: blob["TSHits"] = ts_hits blob["TimesliceInfo"] = ts_info blob["TimesliceFrameInfos"] = ts_frameinfos return blob def _parse_timeslice(self, data): tsl_size, datatype = unpack("<ii", data.read(8)) if not self.legacy: version = unpack("<h", data.read(2))[0] if version != 1: raise ValueError( "Unsupported DAQTimeslice version ({}) or legacy DAQ. " "Make sure Jpp v13+ is used or pass 'legacy=True' " "to {}.".format(version, self.__class__.__name__) ) det_id, run, sqnr = unpack("<iii", data.read(12)) timestamp, ns_ticks, n_frames = unpack("<iii", data.read(12)) ts_info = Table.from_template( { "frame_index": sqnr, "slice_id": 0, "timestamp": timestamp, "nanoseconds": ns_ticks * 16, "n_frames": n_frames, }, "TimesliceInfo", ) ts_frameinfos = {} _dom_ids = [] _channel_ids = [] _times = [] _tots = [] for _ in range(n_frames): frame_size, datatype = unpack("<ii", data.read(8)) det_id, run, sqnr = unpack("<iii", data.read(12)) timestamp, ns_ticks, dom_id = unpack("<iii", data.read(12)) dataqueue_status = unpack("<i", data.read(4))[0] dom_status = unpack("<iiii", data.read(4 * 4)) n_hits = unpack("<i", data.read(4))[0] ts_frameinfos[dom_id] = Table.from_template( { "det_id": det_id, "run_id": run, "sqnr": sqnr, "timestamp": timestamp, "nanoseconds": ns_ticks * 16, "dom_id": dom_id, "dataqueue_status": dataqueue_status, "dom_status": dom_status, "n_hits": n_hits, }, "TimesliceFrameInfo", ) for j in range(n_hits): hit = unpack("!BlB", data.read(6)) _dom_ids.append(dom_id) _channel_ids.append(hit[0]) _times.append(hit[1]) _tots.append(hit[2]) ts_hits = Table( { "channel_id": np.array(_channel_ids), "dom_id": np.array(_dom_ids), "time": np.array(_times), "tot": np.array(_tots), }, name="TimesliceHits", h5loc="/timeslice_hits", split_h5=True, ) return ts_info, ts_frameinfos, ts_hits class RePump(Module): def configure(self): self.filename = self.require("filename") self.fobj = open(self.filename, "rb") def process(self, blob): try: length, data_type = unpack("<ii", self.fobj.read(8)) self.fobj.seek(-8, 1) except struct.error: raise StopIteration data = self.fobj.read(length) blob["RawBytes"] = data return blob def finish(self): self.fobj.close() class DAQPump(Module): def configure(self): self.filename = self.require("filename") self.legacy = self.get("legacy", default=False) self.frame_positions = [] self.index = 0 self.blob_file = self.open_file(self.filename) self.determine_frame_positions() def next_blob(self): blob_file = self.blob_file try: preamble = DAQPreamble(file_obj=blob_file) except struct.error: raise StopIteration try: data_type = DATA_TYPES[preamble.data_type] except KeyError: log.error("Unknown datatype: {0}".format(preamble.data_type)) data_type = "Unknown" blob = Blob() blob[data_type] = None blob["DAQPreamble"] = preamble if data_type == "DAQSummaryslice": daq_frame = DAQSummaryslice(blob_file, legacy=self.legacy) blob[data_type] = daq_frame blob["DAQHeader"] = daq_frame.header elif data_type == "DAQEvent": daq_frame = DAQEvent(blob_file, legacy=self.legacy) blob[data_type] = daq_frame blob["DAQHeader"] = daq_frame.header else: log.warning( "Skipping DAQ frame with data type code '{0}'.".format( preamble.data_type ) ) blob_file.seek(preamble.length - DAQPreamble.size, 1) return blob
MIT License
vitrioil/speech-separation
src/loader/data.py
Signal.augment_audio
python
def augment_audio(self, augmenter: Callable, *args, **kwargs): self.audio = augmenter(self.audio, *args, **kwargs)
Change the audio via the augmenter method.
https://github.com/vitrioil/speech-separation/blob/65a532d36cf0725d622f18ef058cf5a537c01070/src/loader/data.py#L65-L69
import os import cv2 import librosa import numpy as np from pathlib import Path from typing import Callable, Tuple, List EMBED_DIR = [Path("../data/train/embed")] SPEC_DIR = [Path("../data/train/spec")] def get_frames(video): frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) buffer_video = np.empty((frame_count, frame_height, frame_width, 3), np.dtype('uint8')) frame = 0 ret = True while (frame < frame_count and ret): ret, f = video.read() buffer_video[frame] = cv2.cvtColor(f, cv2.COLOR_BGR2RGB) frame += 1 video.release() return buffer_video class Signal: def __init__(self, video_path: str, audio_path: str, audio_ext=".mp3", sr=16_000, video_start_length=0, load_spec=True): self.video_path = Path(video_path) self.audio_path = Path(audio_path) self.video_start_length = video_start_length self.embed_path = None self.embed_saved = False self.embed = None self.load_spec = load_spec self._is_spec = False self.spec_path = Path(*self.audio_path.parts[:-2], "spec", self.audio_path.stem + ".npy") self._load(sr=sr) self._check_video_embed() self._convert_video() def _load(self, sr: int): self.audio = None if self.load_spec and self.spec_path.is_file(): self.audio = np.load(self.spec_path) self._is_spec = True if self.audio is None: self.audio, sr = librosa.load(self.audio_path.as_posix(), sr=sr) self._is_spec = False self.video = cv2.VideoCapture(self.video_path.as_posix())
MIT License
jakecover/distest
distest/TestInterface/_reply.py
assert_reply_contains
python
async def assert_reply_contains(self, contents, substring): response = await self.wait_for_reply(contents) return await self.assert_message_contains(response, substring)
Send a message and wait for a response. If the response does not contain the given substring, fail the test. :param str contents: The content of the trigger message. (A command) :param str substring: The string to test against. :returns: The reply. :rtype: discord.Message :raises: ResponseDidNotMatchError
https://github.com/jakecover/distest/blob/8810c884546a37a67881ddf3fbeed03b6eccebe5/distest/TestInterface/_reply.py#L22-L33
from asyncio import sleep from inspect import signature, _ParameterKind from typing import Dict from discord import Embed, Message async def assert_reply_equals(self, contents, matches): response = await self.wait_for_reply(contents) return await self.assert_message_equals(response, matches)
MIT License
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/report_config.py
ReportConfig.to_dict
python
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ReportConfig, dict): for key, value in self.items(): result[key] = value return result
Returns the model properties as a dict
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/report_config.py#L300-L325
import pprint import re import six class ReportConfig(object): swagger_types = { 'base_url': 'str', 'dashboard_id': 'str', 'name_pattern': 'str', 'state': 'str', 'timewindow': 'str', 'timezone': 'str', 'type': 'str', 'use_current_user_credentials': 'bool', 'use_dashboard_timewindow': 'bool', 'user_id': 'str' } attribute_map = { 'base_url': 'baseUrl', 'dashboard_id': 'dashboardId', 'name_pattern': 'namePattern', 'state': 'state', 'timewindow': 'timewindow', 'timezone': 'timezone', 'type': 'type', 'use_current_user_credentials': 'useCurrentUserCredentials', 'use_dashboard_timewindow': 'useDashboardTimewindow', 'user_id': 'userId' } def __init__(self, base_url=None, dashboard_id=None, name_pattern=None, state=None, timewindow=None, timezone=None, type=None, use_current_user_credentials=None, use_dashboard_timewindow=None, user_id=None): self._base_url = None self._dashboard_id = None self._name_pattern = None self._state = None self._timewindow = None self._timezone = None self._type = None self._use_current_user_credentials = None self._use_dashboard_timewindow = None self._user_id = None self.discriminator = None if base_url is not None: self.base_url = base_url if dashboard_id is not None: self.dashboard_id = dashboard_id if name_pattern is not None: self.name_pattern = name_pattern if state is not None: self.state = state if timewindow is not None: self.timewindow = timewindow if timezone is not None: self.timezone = timezone if type is not None: self.type = type if use_current_user_credentials is not None: self.use_current_user_credentials = use_current_user_credentials if use_dashboard_timewindow is not None: self.use_dashboard_timewindow = use_dashboard_timewindow if user_id is not None: self.user_id = user_id @property def base_url(self): return self._base_url @base_url.setter def base_url(self, base_url): self._base_url = base_url @property def dashboard_id(self): return self._dashboard_id @dashboard_id.setter def dashboard_id(self, dashboard_id): self._dashboard_id = dashboard_id @property def name_pattern(self): return self._name_pattern @name_pattern.setter def name_pattern(self, name_pattern): self._name_pattern = name_pattern @property def state(self): return self._state @state.setter def state(self, state): self._state = state @property def timewindow(self): return self._timewindow @timewindow.setter def timewindow(self, timewindow): self._timewindow = timewindow @property def timezone(self): return self._timezone @timezone.setter def timezone(self, timezone): self._timezone = timezone @property def type(self): return self._type @type.setter def type(self, type): self._type = type @property def use_current_user_credentials(self): return self._use_current_user_credentials @use_current_user_credentials.setter def use_current_user_credentials(self, use_current_user_credentials): self._use_current_user_credentials = use_current_user_credentials @property def use_dashboard_timewindow(self): return self._use_dashboard_timewindow @use_dashboard_timewindow.setter def use_dashboard_timewindow(self, use_dashboard_timewindow): self._use_dashboard_timewindow = use_dashboard_timewindow @property def user_id(self): return self._user_id @user_id.setter def user_id(self, user_id): self._user_id = user_id
Apache License 2.0
xilinx/pyxir
python/pyxir/contrib/dpuv1/dpuv1_op_support.py
mean_op_support
python
def mean_op_support(X, bXs, tXs): axes = X.attrs['axes'] keepdims = X.attrs['keepdims'] return len(axes) == 2 and keepdims
Check whether we can execute the provided Mean operator on the dpuv1 target
https://github.com/xilinx/pyxir/blob/bef661d6d77adcdbd2cf4163f2cf3a1d31d40406/python/pyxir/contrib/dpuv1/dpuv1_op_support.py#L212-L220
import math import pyxir import logging logger = logging.getLogger('pyxir') @pyxir.register_op_support_check('dpuv1', 'BatchNorm') def batchnorm_op_support(X, bXs, tXs): axis = X.attrs['axis'] channels = X.shapes[axis] return channels >= 1 and channels <= 4096 @pyxir.register_op_support_check('dpuv1', 'BiasAdd') def biasadd_op_support(X, bXs, tXs): axis = X.attrs['axis'] channels = X.shapes[axis] return channels >= 1 and channels <= 4096 @pyxir.register_op_support_check('dpuv1', 'Cast') def cast_op_support(X, bXs, tXs): dtype = X.attrs['dtype'] return dtype == 'float32' @pyxir.register_op_support_check('dpuv1', 'Concat') def concat_op_support(X, bXs, tXs): axis = X.attrs['axis'] channels = X.shapes[axis] return channels >= 1 and channels <= 4096 @pyxir.register_op_support_check('dpuv1', 'Convolution') def conv2d_op_support(X, bXs, tXs): data_layout = X.attrs['data_layout'] kernel_h, kernel_w = X.attrs['kernel_size'] stride_h, stride_w = X.attrs['strides'] dilation_h, dilation_w = X.attrs['dilation'] padding_h, padding_w = X.attrs['padding'][data_layout.index('H')], X.attrs['padding'][data_layout.index('H')] padding_h_top, padding_h_bot = padding_h padding_w_left, padding_w_right = padding_w ch_in, ch_out = X.attrs['channels'] groups = X.attrs['groups'] return groups == 1 and kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and ch_in >= 1 and ch_in <= 4096 and ch_out >= 1 and ch_out <= 4096 and dilation_h in [1, 2, 4] and dilation_w in [1, 2, 4] @pyxir.register_op_support_check('dpuv1', 'Conv2DTranspose') def conv2d_transpose_op_support(X, bXs, tXs): data_layout = X.attrs['data_layout'] kernel_h, kernel_w = X.attrs['kernel_size'] stride_h, stride_w = X.attrs['strides'] dilation_h, dilation_w = X.attrs['dilation'] padding_h, padding_w = X.attrs['padding'][data_layout.index('H')], X.attrs['padding'][data_layout.index('W')] padding_h_top, padding_h_bot = padding_h padding_w_left, padding_w_right = padding_w padding = X.attrs['padding'] ch_in, ch_out = X.attrs['channels'] groups = X.attrs['groups'] return groups == 1 and kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and ch_in >= 1 and ch_in <= 4096 and ch_out >= 1 and ch_out <= 4096 and dilation_h in [1, 2, 4] and dilation_w in [1, 2, 4] @pyxir.register_op_support_check('dpuv1', 'DPU') def dpu_op_support(X, bXs, tXs): return True @pyxir.register_op_support_check('dpuv1', 'Eltwise') def eltwise_op_support(X, bXs, tXs): return True @pyxir.register_op_support_check('dpuv1', 'Pad') def pooling_op_support(X, bXs, tXs): padding = X.attrs['padding'] return True @pyxir.register_op_support_check('dpuv1', 'Pooling') def pooling_op_support(X, bXs, tXs): data_layout = X.attrs['data_layout'] kernel_h, kernel_w = X.attrs['kernel_size'] stride_h, stride_w = X.attrs['strides'] padding_h, padding_w = X.attrs['padding'][data_layout.index('H')], X.attrs['padding'][data_layout.index('H')] padding_h_top, padding_h_bot = padding_h padding_w_left, padding_w_right = padding_w channels = X.shapes[data_layout.index('C')] return kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and channels >= 1 and channels <= 4096 @pyxir.register_op_support_check('dpuv1', 'Mean')
Apache License 2.0
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/nltk/data.py
normalize_resource_name
python
def normalize_resource_name(resource_name, allow_relative=True, relative_path=None): is_dir = bool(re.search(r'[\\/.]$', resource_name)) or resource_name.endswith(os.path.sep) if sys.platform.startswith('win'): resource_name = resource_name.lstrip('/') else: resource_name = re.sub(r'^/+', '/', resource_name) if allow_relative: resource_name = os.path.normpath(resource_name) else: if relative_path is None: relative_path = os.curdir resource_name = os.path.abspath( os.path.join(relative_path, resource_name)) resource_name = resource_name.replace('\\', '/').replace(os.path.sep, '/') if sys.platform.startswith('win') and os.path.isabs(resource_name): resource_name = '/' + resource_name if is_dir and not resource_name.endswith('/'): resource_name += '/' return resource_name
:type resource_name: str or unicode :param resource_name: The name of the resource to search for. Resource names are posix-style relative path names, such as ``corpora/brown``. Directory names will automatically be converted to a platform-appropriate path separator. Directory trailing slashes are preserved >>> windows = sys.platform.startswith('win') >>> normalize_resource_name('.', True) './' >>> normalize_resource_name('./', True) './' >>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file' True >>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file' True >>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file' True >>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file' True >>> not windows or normalize_resource_name('/dir/file', True, '/') == 'dir/file' True >>> windows or normalize_resource_name('/dir/file', True, '/') == '/dir/file' True
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/data.py#L210-L254
from __future__ import print_function, unicode_literals from __future__ import division from abc import ABCMeta, abstractmethod from six import add_metaclass import functools import textwrap import io import os import re import sys import zipfile import codecs from gzip import GzipFile, READ as GZ_READ, WRITE as GZ_WRITE try: textwrap_indent = functools.partial(textwrap.indent, prefix=' ') except AttributeError: textwrap_fill = functools.partial(textwrap.fill, initial_indent=' ', subsequent_indent=' ', replace_whitespace=False) def textwrap_indent(text): return '\n'.join(textwrap_fill(line) for line in text.splitlines()) try: from zlib import Z_SYNC_FLUSH as FLUSH except ImportError: from zlib import Z_FINISH as FLUSH try: import cPickle as pickle except ImportError: import pickle from six import string_types, text_type from six.moves.urllib.request import urlopen, url2pathname import nltk from nltk.compat import py3_data, add_py3_data, BytesIO path = [] _paths_from_env = os.environ.get('NLTK_DATA', str('')).split(os.pathsep) path += [d for d in _paths_from_env if d] if 'APPENGINE_RUNTIME' not in os.environ and os.path.expanduser('~/') != '~/': path.append(os.path.expanduser(str('~/nltk_data'))) if sys.platform.startswith('win'): path += [ str(r'C:\nltk_data'), str(r'D:\nltk_data'), str(r'E:\nltk_data'), os.path.join(sys.prefix, str('nltk_data')), os.path.join(sys.prefix, str('share'), str('nltk_data')), os.path.join(sys.prefix, str('lib'), str('nltk_data')), os.path.join( os.environ.get(str('APPDATA'), str('C:\\')), str('nltk_data')) ] else: path += [ str('/usr/share/nltk_data'), str('/usr/local/share/nltk_data'), str('/usr/lib/nltk_data'), str('/usr/local/lib/nltk_data'), os.path.join(sys.prefix, str('nltk_data')), os.path.join(sys.prefix, str('share'), str('nltk_data')), os.path.join(sys.prefix, str('lib'), str('nltk_data')) ] def gzip_open_unicode(filename, mode="rb", compresslevel=9, encoding='utf-8', fileobj=None, errors=None, newline=None): if fileobj is None: fileobj = GzipFile(filename, mode, compresslevel, fileobj) return io.TextIOWrapper(fileobj, encoding, errors, newline) def split_resource_url(resource_url): protocol, path_ = resource_url.split(':', 1) if protocol == 'nltk': pass elif protocol == 'file': if path_.startswith('/'): path_ = '/' + path_.lstrip('/') else: path_ = re.sub(r'^/{0,2}', '', path_) return protocol, path_ def normalize_resource_url(resource_url): try: protocol, name = split_resource_url(resource_url) except ValueError: protocol = 'nltk' name = resource_url if protocol == 'nltk' and os.path.isabs(name): protocol = 'file://' name = normalize_resource_name(name, False, None) elif protocol == 'file': protocol = 'file://' name = normalize_resource_name(name, False, None) elif protocol == 'nltk': protocol = 'nltk:' name = normalize_resource_name(name, True) else: protocol += '://' return ''.join([protocol, name])
MIT License
bigmlcom/bigmler
bigmler/resourcesapi/batch_anomaly_scores.py
create_batch_anomaly_score
python
def create_batch_anomaly_score(anomaly, test_dataset, batch_anomaly_score_args, args, api=None, session_file=None, path=None, log=None): if api is None: api = bigml.api.BigML() message = dated("Creating batch anomaly score.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_anomaly_score = api.create_batch_anomaly_score( anomaly, test_dataset, batch_anomaly_score_args, retries=None) log_created_resources( "batch_anomaly_score", path, bigml.api.get_batch_anomaly_score_id(batch_anomaly_score), mode='a') batch_anomaly_score_id = check_resource_error( batch_anomaly_score, "Failed to create batch prediction: ") try: batch_anomaly_score = check_resource(batch_anomaly_score, api.get_batch_anomaly_score, raise_on_error=True) except Exception as exception: sys.exit("Failed to get a finished batch anomaly score: %s" % str(exception)) message = dated("Batch anomaly score created: %s\n" % get_url(batch_anomaly_score)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % batch_anomaly_score_id, log_file=log) if args.reports: report(args.reports, path, batch_anomaly_score) return batch_anomaly_score
Creates remote batch anomaly score
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/resourcesapi/batch_anomaly_scores.py#L74-L106
import sys import bigml.api from bigmler.utils import (dated, get_url, log_message, check_resource, check_resource_error, log_created_resources) from bigmler.reports import report from bigmler.resourcesapi.common import set_basic_batch_args, map_fields, update_json_args from bigmler.resourcesapi.common import FULL_FORMAT def set_batch_anomaly_score_args(args, fields=None, dataset_fields=None): batch_anomaly_score_args = set_basic_batch_args(args, args.name) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields batch_anomaly_score_args.update({ "fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) if args.prediction_info == FULL_FORMAT: batch_anomaly_score_args.update(all_fields=True) if args.prediction_fields: batch_anomaly_score_args.update(all_fields=False) prediction_fields = [] for field in args.prediction_fields.split(args.args_separator): field = field.strip() if not field in dataset_fields.fields: try: field = dataset_fields.field_id(field) except ValueError as exc: sys.exit(exc) prediction_fields.append(field) batch_anomaly_score_args.update(output_fields=prediction_fields) if 'batch_anomaly_score' in args.json_args: update_json_args( batch_anomaly_score_args, args.json_args.get('batch_anomaly_score'), fields) return batch_anomaly_score_args
Apache License 2.0
loudnate/openaps-predict
openapscontrib/predict/predict.py
ceil_datetime_at_minute_interval
python
def ceil_datetime_at_minute_interval(timestamp, minute): nsecs = timestamp.minute * 60 + timestamp.second + timestamp.microsecond * 1e-6 seconds = minute * 60 delta = (nsecs // seconds) * seconds + seconds - nsecs if delta < seconds: return timestamp + datetime.timedelta(seconds=delta) else: return timestamp
From http://stackoverflow.com/questions/13071384/python-ceil-a-datetime-to-next-quarter-of-an-hour :param timestamp: :type timestamp: datetime.datetime :param minute: :type minute: int :return: :rtype: datetime.datetime
https://github.com/loudnate/openaps-predict/blob/a2da82148318d86b0935a1fca596477d84dd8247/openapscontrib/predict/predict.py#L50-L71
from collections import defaultdict import datetime from dateutil.parser import parse from functools32 import lru_cache import math from numpy import arange from scipy.stats import linregress from models import Unit class Schedule(object): def __init__(self, entries): self.entries = entries @lru_cache() def at(self, time): result = {} for entry in self.entries: if parse(entry['start']).time() > time: break result = entry return result def floor_datetime_at_minute_interval(timestamp, minute): return timestamp - datetime.timedelta( minutes=timestamp.minute % minute, seconds=timestamp.second, microseconds=timestamp.microsecond )
MIT License
edx-unsupported/edx-load-tests
loadtests/student_notes/locustfile.py
BaseNotesTask._create_many_notes
python
def _create_many_notes(self, num_notes): for _ in xrange(num_notes): self._create_note()
Create many notes within the course for the current user.
https://github.com/edx-unsupported/edx-load-tests/blob/1a6dc891d2fb72575f354521988a531489f30032/loadtests/student_notes/locustfile.py#L202-L207
import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) from contextlib import contextmanager from copy import copy import json from locust import HttpLocust, task, TaskSet import logging import random from helpers import settings settings.init(__name__, required_data=[ 'courses', 'NOTES_HOST', 'NUM_NOTES', 'NUM_WORDS', 'NUM_TAGS', 'NUM_SEARCH_TERMS', 'LOCUST_TASK_SET', 'LOCUST_MIN_WAIT', 'LOCUST_MAX_WAIT', ]) from helpers.mixins import EnrollmentTaskSetMixin from helpers.edx_app import EdxAppTasks HIGHLIGHT_TAG = 'span' HIGHLIGHT_CLASS = 'note-highlight' DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'notes_data/') with open(os.path.join(DATA_DIRECTORY, 'basic_words.txt')) as f: NOTES_TEXT = [word for line in f for word in line.split()] log = logging.getLogger(__name__) def pick_some(sequence, num_items): return random.sample(sequence, random.randint(1, num_items)) class BaseNotesTask(EdxAppTasks, EnrollmentTaskSetMixin): def __init__(self, *args, **kwargs): super(BaseNotesTask, self).__init__(*args, **kwargs) self._notes = {} self.api_client = copy(self.client) self.api_client.auth = None def on_start(self): self.auto_auth() self.enroll(self.course_id) @property def annotator_auth_token(self): return self.client.get( '/courses/{course_id}/edxnotes/token/'.format(course_id=self.course_id), headers={'content-type': 'text/plain'}, ).content @contextmanager def get_posted_student_note(self, warning_message): try: yield self._notes[random.choice(self._notes.keys())] except IndexError: log.debug(warning_message) yield self._create_note() def _request_from_notes_service(self, method, path, params_or_body=None, **kwargs): method = method.lower() kwargs.update({ 'headers': { 'x-annotator-auth-token': self.annotator_auth_token, 'X-CSRFToken': self.client.cookies.get('csrftoken', ''), 'content-type': 'application/json', }, }) if params_or_body is not None: if method == 'get': kwargs.update({'params': params_or_body}) elif method in ['post', 'patch', 'put', 'delete']: kwargs.update({'data': json.dumps(params_or_body)}) return getattr(self.api_client, method)( settings.data['NOTES_HOST'] + path, **kwargs ) def get(self, path, params=None, **kwargs): return self._request_from_notes_service('get', path, params, **kwargs) def post(self, path, body=None, **kwargs): return self._request_from_notes_service('post', path, body, **kwargs) def put(self, path, body=None, **kwargs): return self._request_from_notes_service('put', path, body, **kwargs) def delete(self, path, params=None, **kwargs): return self._request_from_notes_service('delete', path, params, **kwargs) def _create_note(self): data = { 'user': self._anonymous_user_id, 'course_id': self.course_id, 'text': ' '.join(pick_some( NOTES_TEXT, settings.data['NUM_WORDS'], )), 'tags': pick_some( NOTES_TEXT, settings.data['NUM_TAGS'], ), 'quote': ' '.join(pick_some(NOTES_TEXT, 5)), 'usage_id': self.course_data.html_usage_id, 'ranges': [ { 'start': '/div[1]/p[1]', 'end': '/div[1]/p[1]', 'startOffset': 0, 'endOffset': 6 } ], } note = json.loads(self.post('/api/v1/annotations/', data).content) self._notes[note['id']] = note return note
Apache License 2.0
bobotig/thermalprinter
thermalprinter/thermalprinter.py
ThermalPrinter.upside_down
python
def upside_down(self, state=False): state = bool(state) if state is not self._upside_down: self._upside_down = state self.send_command(Command.ESC, 123, int(state))
Turns on/off upside-down printing mode.
https://github.com/bobotig/thermalprinter/blob/4cf697049d6c4fbad31ba8ea6842e0a4bc1b35ad/thermalprinter/thermalprinter.py#L624-L630
from atexit import register from time import sleep from serial import Serial from .constants import (BarCodePosition, CharSet, Chinese, CodePage, CodePageConverted, Command) from .exceptions import (ThermalPrinterValueError, ThermalPrinterCommunicationError) from .validate import (validate_barcode, validate_barcode_position, validate_charset, validate_chinese_format, validate_codepage) class ThermalPrinter(Serial): __lines = 0 __feeds = 0 def __init__(self, port='/dev/ttyAMA0', baudrate=19200, **kwargs): self.heat_time = int(kwargs.get('heat_time', 80)) if not 0 <= self.heat_time <= 255: raise ThermalPrinterValueError( 'heat_time should be between 0 and 255 (default: 80).') self.heat_interval = int(kwargs.get('heat_interval', 12)) if not 0 <= self.heat_interval <= 255: raise ThermalPrinterValueError( 'heat_interval should be between 0 and 255 (default: 12).') self.most_heated_point = int(kwargs.get('most_heated_point', 3)) if not 0 <= self.most_heated_point <= 255: raise ThermalPrinterValueError( 'most_heated_point should be between 0 and 255 (default: 3).') self._baudrate = baudrate self._byte_time = 11.0 / float(self._baudrate) self._dot_feed_time = 0.0025 self._dot_print_time = 0.033 super().__init__(port=port, baudrate=self._baudrate) sleep(0.5) register(self._on_exit) self.send_command(Command.ESC, 55, self.most_heated_point, self.heat_time, self.heat_interval) self.reset() def __enter__(self): return self def _on_exit(self): self.close() def __repr__(self): settings = ( 'heat_interval=' + str(self.heat_interval), 'heat_time=' + str(self.heat_time), 'most_heated_point=' + str(self.most_heated_point), ) states = [] for var in vars(self): if not var.startswith('_'): continue try: attr = getattr(self, var[1:]) except AttributeError: continue else: if not callable(attr): continue states.append('{}={}'.format(var[1:], getattr(self, var))) return '{name}<id=0x{id:x}, {settings}>({states})'.format( name=type(self).__name__, id=id(self), settings=', '.join(sorted(settings)), states=', '.join(sorted(states))) @property def is_online(self): return self.__is_online @property def is_sleeping(self): return self.__is_sleeping @property def lines(self): return self.__lines @property def feeds(self): return self.__feeds @property def max_column(self): return self.__max_column def out(self, data, line_feed=True, **kwargs): if data is None: return for style, value in kwargs.items(): try: getattr(self, style)(value) except TypeError: pass self.write(self.to_bytes(data)) if line_feed: self.write(b'\n') self.__lines += 1 if self._size != 'S': self.__lines += 1 sleep(2 * self._dot_feed_time * self._char_height) for style, value in kwargs.items(): try: getattr(self, style)() except TypeError: pass def send_command(self, *args): for data in args: if isinstance(data, Command): data = data.value self.write(bytes([data])) sleep(len(args) * self._byte_time) def to_bytes(self, data): if isinstance(data, (bool, int, float, complex)): data = str(data) elif isinstance(data, bytes): return data elif isinstance(data, bytearray): return bytes(data) elif isinstance(data, memoryview): return data.tobytes() encoding = 'utf-8' if self._chinese else self._codepage.name try: return bytes(data, encoding, errors='replace') except LookupError: encoding = CodePageConverted[self._codepage.name].value return bytes(data, encoding, errors='replace') def barcode(self, data, barcode_type): validate_barcode(data, barcode_type) code = barcode_type.value[0] self.send_command(Command.GS, 107, code, len(data)) for char in list(data): char = bytes([ord(char)]) self.write(char) sleep( (self._barcode_height + self._line_spacing) * self._dot_print_time) self.__lines += int(self._barcode_height / self._line_spacing) + 1 def barcode_height(self, height=162): if not isinstance(height, int) or not 1 <= height <= 255: raise ThermalPrinterValueError( 'height should be between 1 and 255 (default: 162).') if height != self._barcode_height: self._barcode_height = height self.send_command(Command.GS, 104, height) def barcode_left_margin(self, margin=0): if not isinstance(margin, int) or not 0 <= margin <= 255: raise ThermalPrinterValueError( 'margin should be between 0 and 255 (default: 0).') if margin != self._barcode_left_margin: self._barcode_left_margin = margin self.send_command(Command.GS, 120, margin) def barcode_position(self, position=BarCodePosition.HIDDEN): validate_barcode_position(position) if position is not self._barcode_position: self._barcode_position = position self.send_command(Command.GS, 72, position.value) def barcode_width(self, width=3): if not isinstance(width, int) or not 2 <= width <= 6: raise ThermalPrinterValueError( 'width should be between 2 and 6 (default: 3).') if width != self._barcode_width: self._barcode_width = width self.send_command(Command.GS, 119, width) def bold(self, state=False): state = bool(state) if state is not self._bold: self._bold = state self.send_command(Command.ESC, 69, int(state)) def charset(self, charset=CharSet.USA): validate_charset(charset) if charset is not self._charset: self._charset = charset self.send_command(Command.ESC, 82, charset.value) def char_spacing(self, spacing=0): if not isinstance(spacing, int) or not 0 <= spacing <= 255: raise ThermalPrinterValueError( 'spacing should be between 0 and 255 (default: 0).') if spacing != self._char_spacing: self._char_spacing = spacing self.send_command(Command.ESC, 32, spacing) def chinese(self, state=False): state = bool(state) if state is not self._chinese: self._chinese = state self.send_command(Command.FS, 38 if state else 46) def chinese_format(self, fmt=Chinese.GBK): validate_chinese_format(fmt) if fmt is not self._chinese_format: self._chinese_format = fmt self.send_command(Command.ESC, 57, fmt.value) def codepage(self, codepage=CodePage.CP437): validate_codepage(codepage) if not self._chinese and codepage is not self._codepage: self._codepage = codepage value, _ = codepage.value self.send_command(Command.ESC, 116, value) sleep(0.05) def double_height(self, state=False): state = bool(state) if state is not self._double_height: self._double_height = state self._char_height = 48 if state else 24 self.send_command(Command.ESC, 33, 16 if state else 0) def double_width(self, state=False): state = bool(state) if state is not self._double_width: self._double_width = state self.__max_column = 16 if state else 32 self.send_command(Command.ESC, 14 if state else 20, 1) def feed(self, number=1): if not isinstance(number, int) or not 0 <= number <= 255: raise ThermalPrinterValueError( 'number should be between 0 and 255 (default: 1).') self.send_command(Command.ESC, 100, number) sleep(number * self._dot_feed_time * self._char_height) self.__feeds += number def flush(self, clear=False): self.send_command(Command.ESC, 64) self.reset_output_buffer() sleep(0.05) if clear: self.reset_input_buffer() def image(self, image): if not hasattr(image, 'im'): raise ThermalPrinterValueError('image should be a PIL Image.') if image.mode != '1': image = image.convert('1') width = min(image.size[0], 384) height = image.size[1] row_bytes = int((width + 7) / 8) row_bytes_clipped = min(row_bytes, 48) bitmap = bytearray(row_bytes * height) pixels = image.load() for col in range(height): offset = col * row_bytes row = 0 for pad in range(row_bytes): sum_ = 0 bit = 128 while bit > 0: if row >= width: break if pixels[row, col] == 0: sum_ |= bit row += 1 bit >>= 1 bitmap[offset + pad] = sum_ idx = 0 for row_start in range(0, height, 255): chunk_height = min(height - row_start, 255) self.send_command(Command.DC2, 42, chunk_height, row_bytes_clipped) for _ in range(chunk_height): for _ in range(row_bytes_clipped): self.write(bytes([bitmap[idx]])) idx += 1 sleep(row_bytes_clipped * self._byte_time) idx += row_bytes - row_bytes_clipped self.__lines += height // self._line_spacing + 1 def inverse(self, state=False): state = bool(state) if state is not self._inverse: self._inverse = state self.send_command(Command.GS, 66, int(state)) def justify(self, value='L'): if not isinstance(value, str) or value not in 'LCRlcr': err = 'value should be one of L (left, default), C (center)' err += ' or R (right).' raise ThermalPrinterValueError(err) value = value.upper() if value != self._justify: self._justify = value if value == 'C': pos = 1 elif value == 'R': pos = 2 else: pos = 0 self.send_command(Command.ESC, 97, pos) def left_margin(self, margin=0): if not isinstance(margin, int) or not 0 <= margin <= 47: raise ThermalPrinterValueError( 'margin should be between 0 and 47 (default: 0).') if margin != self._left_margin: self._left_margin = margin self.send_command(Command.ESC, 66, margin) def line_spacing(self, spacing=30): if not isinstance(spacing, int) or not 0 <= spacing <= 255: raise ThermalPrinterValueError( 'spacing should be between 0 and 255 (default: 30).') if spacing != self._line_spacing: self._line_spacing = spacing self.send_command(Command.ESC, 51, spacing) def offline(self): if self.is_online: self.__is_online = False self.send_command(Command.ESC, 61, 0) def online(self): if not self.is_online: self.__is_online = True self.send_command(Command.ESC, 61, 1) def reset(self): self.send_command(Command.ESC, 64) self.__max_column = 32 self.__is_online = True self.__is_sleeping = False self._barcode_height = 162 self._barcode_left_margin = 0 self._barcode_position = BarCodePosition.HIDDEN self._barcode_width = 3 self._bold = False self._charset = CharSet.USA self._char_spacing = 0 self._char_height = 24 self._chinese = False self._chinese_format = Chinese.GBK self._codepage = CodePage.CP437 self._double_height = False self._double_width = False self._inverse = False self._justify = 'L' self._left_margin = 0 self._line_spacing = 30 self._rotate = False self._size = 'S' self._strike = False self._underline = 0 self._upside_down = False def rotate(self, state=False): state = bool(state) if state is not self._rotate: self._rotate = state self.send_command(Command.ESC, 86, int(state)) def size(self, value='S'): if not isinstance(value, str) or value not in 'SMLsml': err = 'value should be one of S (small, default), M (medium)' err += ' or L (large).' raise ThermalPrinterValueError(err) value = value.upper() if value != self._size: self._size = value if value == 'L': size, self._char_height, self.__max_column = 0x11, 48, 16 elif value == 'M': size, self._char_height, self.__max_column = 0x01, 48, 32 else: size, self._char_height, self.__max_column = 0x00, 24, 32 self.send_command(Command.GS, 33, size) def sleep(self, seconds=1): if self.is_sleeping: return if not isinstance(seconds, int) or seconds < 0: raise ThermalPrinterValueError( 'seconds should be null or positive (default: 0).') if seconds: self.__is_sleeping = True self.send_command(Command.ESC, 56, seconds, seconds >> 8) def status(self, raise_on_error=True): ret = {'movement': True, 'paper': True, 'temp': True, 'voltage': True} self.send_command(Command.ESC, 118, 0) sleep(0.05) if self.in_waiting: stat = ord(self.read(1)) ret['movement'] = stat & 0b00000001 == 1 ret['paper'] = stat & 0b00000100 == 0 ret['voltage'] = stat & 0b00001000 == 0 ret['temp'] = stat & 0b01000000 == 0 elif raise_on_error: raise ThermalPrinterCommunicationError() return ret def strike(self, state=False): state = bool(state) if state is not self._strike: self._strike = state self.send_command(Command.ESC, 71, int(state)) def test(self): self.send_command(Command.DC2, 84) sleep(self._dot_print_time * 24 * 26 + self._dot_feed_time * (8 * 26 + 32)) def underline(self, weight=0): if not isinstance(weight, int) or not 0 <= weight <= 2: raise ThermalPrinterValueError( 'weight should be between 0 and 2 (default: 0).') if weight != self._underline: self._underline = weight self.send_command(Command.ESC, 45, weight)
MIT License
flask-admin/flask-admin
flask_admin/contrib/appengine/form.py
AdminModelConverter.convert_GeoPtProperty
python
def convert_GeoPtProperty(self, model, prop, kwargs): return GeoPtPropertyField(**kwargs)
Returns a form field for a ``ndb.GeoPtProperty``.
https://github.com/flask-admin/flask-admin/blob/e39f786374ce0e60db93f583efacb4672de0025c/flask_admin/contrib/appengine/form.py#L8-L10
from wtforms_appengine.ndb import ModelConverter from .fields import GeoPtPropertyField from flask_admin.model.form import converts class AdminModelConverter(ModelConverter): @converts('GeoPt')
BSD 3-Clause New or Revised License
chrklemm/sesmg
program_files/create_objects.py
Sinks.create_sink
python
def create_sink(self, de: dict, timeseries_args: dict): self.nodes_sinks.append( solph.Sink(label=de['label'], inputs={ self.busd[de['input']]: solph.Flow(**timeseries_args)}))
Creates an oemof sink with fixed or unfixed timeseries. :param de: dictionary containing all information for the creation of an oemof sink. At least the following key-value-pairs have to be included: - 'label' - 'input' :type de: dict :param timeseries_args: dictionary rather containing the 'fix-attribute' or the 'min-' and 'max-attribute' of a sink :type timeseries_args: dict Christian Klemm - christian.klemm@fh-muenster.de
https://github.com/chrklemm/sesmg/blob/382ffd600b98d3cc6df53abed0cb3526187cb1cf/program_files/create_objects.py#L693-L718
from oemof import solph import logging import os import pandas as pd from feedinlib import * import demandlib.bdew as bdew import datetime import numpy def buses(nodes_data: dict, nodes: list) -> dict: busd = {} for i, b in nodes_data['buses'].iterrows(): if b['active']: bus = solph.Bus(label=b['label']) nodes.append(bus) busd[b['label']] = bus logging.info(' ' + 'Bus created: ' + b['label']) if b['excess']: inputs = { busd[b['label']]: solph.Flow(variable_costs=b['excess costs'], emission_factor=b[ 'excess constraint costs'])} nodes.append( solph.Sink( label=b['label'] + '_excess', inputs=inputs)) if b['shortage']: outputs = { busd[b['label']]: solph.Flow( variable_costs=b['shortage costs'], emission_factor=b[ 'shortage constraint costs'])} nodes.append( solph.Source( label=b['label'] + '_shortage', outputs=outputs)) return busd class Sources: def create_source(self, so: dict, timeseries_args: dict, output=None): if output is None: output = self.busd[so['output']] if str(so['input']) in ['0', 'None', 'none', 'nan']: minimum = so['min. investment capacity'] maximum = so['max. investment capacity'] existing = so['existing capacity'] else: minimum = so['min. investment capacity'] * so['Conversion Factor'] maximum = so['max. investment capacity'] * so['Conversion Factor'] existing = so['existing capacity'] * so['Conversion Factor'] self.nodes_sources.append( solph.Source( label=so['label'], outputs={output: solph.Flow( investment=solph.Investment( ep_costs=so[ 'periodical costs'], periodical_constraint_costs=so[ 'periodical constraint costs'], minimum=minimum, maximum=maximum, existing=existing, nonconvex=True if so['non-convex investment'] == 1 else False, offset=so[ 'fix investment costs']), **timeseries_args, variable_costs=so['variable costs'], emission_factor=so[ 'variable constraint costs'] )} )) def commodity_source(self, so: dict): self.create_source(so, {'min': 0, 'max': 1}, self.busd[so['output']]) logging.info(' ' + 'Commodity Source created: ' + so['label']) def timeseries_source(self, so: dict, time_series): if so['fixed'] == 1: args = {'fix': time_series[so['label'] + '.fix'].tolist()} elif so['fixed'] == 0: args = {'min': time_series[so['label'] + '.min'].tolist(), 'max': time_series[so['label'] + '.max'].tolist()} else: raise SystemError(so['label'] + " Error in fixed attribute") self.create_source(so, args, self.busd[so['output']]) logging.info(' ' + 'Timeseries Source created: ' + so['label']) def pv_source(self, so: dict, my_weather_pandas_dataframe): parameter_set = { 'azimuth': so['Azimuth'], 'tilt': so['Surface Tilt'], 'module_name': so['Modul Model'], 'inverter_name': so['Inverter Model'], 'albedo': so['Albedo']} pv_module = powerplants.Photovoltaic(**parameter_set) my_weather_pandas_dataframe['ghi'] = (my_weather_pandas_dataframe.dirhi + my_weather_pandas_dataframe.dhi) name_dc = {'temperature': 'temp_air', 'windspeed': 'v_wind'} my_weather_pandas_dataframe.rename(columns=name_dc) feedin = pv_module.feedin( weather=my_weather_pandas_dataframe, location=(so['Latitude'], so['Longitude']), scaling='peak_power') for i in range(len(feedin)): if feedin[i] < 0: feedin[i] = 0 if feedin[i] > 1: feedin[i] = 1 feedin = feedin.fillna(0) if so['fixed'] == 1: args = {'fix': feedin} elif so['fixed'] == 0: args = {'min': 0, 'max': feedin} else: raise SystemError(so['label'] + " Error in fixed attribute") self.create_source(so, args, self.busd[so['output']]) logging.info(' ' + 'Source created: ' + so['label']) def windpower_source(self, so: dict, weather_df_wind): turbine_data = { 'turbine_type': so['Turbine Model'], 'hub_height': so['Hub Height']} wind_turbine = WindPowerPlant(**turbine_data) data_height = {'pressure': 0, 'temperature': 2, 'wind_speed': 10, 'roughness_length': 0} weather_df_wind = weather_df_wind[['windspeed', 'temperature', 'z0', 'pressure']] weather_df_wind.columns = [['wind_speed', 'temperature', 'roughness_length', 'pressure'], [data_height['wind_speed'], data_height['temperature'], data_height['roughness_length'], data_height['pressure']]] feedin_wind_scaled = wind_turbine.feedin( weather=weather_df_wind, scaling='nominal_power') if so['fixed'] == 1: args = {'fix': feedin_wind_scaled} elif so['fixed'] == 0: args = {'min': 0, 'max': feedin_wind_scaled} else: raise SystemError(so['label'] + " Error in fixed attribute") self.create_source(so, args, self.busd[so['output']]) logging.info(' ' + 'Source created: ' + so['label']) def solar_heat_source(self, so, data): from oemof.thermal.solar_thermal_collector import flat_plate_precalc from oemof.thermal.concentrating_solar_power import csp_precalc import numpy col_bus = solph.Bus(label=so['label'] + '_bus') self.nodes_sources.append(col_bus) self.busd[so['label'] + '_bus'] = col_bus output = col_bus data.index.name = 'Datum' data = data.asfreq('h') data['ghi'] = (data["dirhi"] + data["dhi"]) if so['technology'] == 'solar_thermal_flat_plate': precalc_results = flat_plate_precalc( lat=so['Latitude'], long=so['Longitude'], collector_tilt=so['Surface Tilt'], collector_azimuth=so['Azimuth'], eta_0=so['ETA 0'], a_1=so['A1'], a_2=so['A2'], temp_collector_inlet= so['Temperature Inlet'], delta_temp_n= so['Temperature Difference'], irradiance_global=(data['ghi']), irradiance_diffuse=(data['dhi']), temp_amb=data['temperature']) collectors_heat = precalc_results.collectors_heat/1000 irradiance = precalc_results.col_ira/1000 elif so['technology'] == 'concentrated_solar_power': precalc_results = csp_precalc( lat=so['Latitude'], long=so['Longitude'], collector_tilt=so['Surface Tilt'], collector_azimuth=so['Azimuth'], cleanliness = so['Cleanliness'], a_1=so['A1'], a_2 = so['A2'], eta_0=so['ETA 0'], c_1 = so['C1'], c_2 = so['C2'], temp_collector_inlet = so['Temperature Inlet'], temp_collector_outlet = so['Temperature Inlet'] + so['Temperature Difference'], temp_amb=data['temperature'], E_dir_hor=data['dirhi']) collectors_heat = precalc_results.collector_heat/1000 irradiance = precalc_results.collector_irradiance/1000 if so['fixed'] == 1: args = {'fix': collectors_heat} elif so['fixed'] == 0: args = {'min': 0, 'max': collectors_heat} else: raise SystemError(so['label'] + " Error in fixed attribute") self.create_source(so, args, output) self.nodes_sources.append(solph.Transformer( label=so['label'] + '_collector', inputs={self.busd[so['label'] + '_bus']: solph.Flow(variable_costs=0), self.busd[so['input']]: solph.Flow(variable_costs=0)}, outputs={self.busd[so['output']]: solph.Flow(variable_costs=0)}, conversion_factors={ self.busd[so['label'] + '_bus']: 1, self.busd[so['input']]: so['Electric Consumption'] * (1 - so['Peripheral Losses']), self.busd[so['output']]: 1 - so['Peripheral Losses'] })) logging.info(' ' + 'Source created: ' + so['label'] + ", Max Heat power output per year and m²: {:2.2f}". format(numpy.sum(collectors_heat)) + ' kWh/(m²a)' + ", Irradiance on collector per year and m²: " "{:2.2f}".format(numpy.sum(irradiance)) + ' kWh/(m²a)') def __init__(self, nodes_data: dict, nodes: list, busd: dict, time_series, weather_data): self.nodes_sources = [] self.busd = busd.copy() for i, so in nodes_data['sources'].iterrows(): if so['active']: if so['technology'] == 'other': self.commodity_source(so) elif so['technology'] == 'photovoltaic': self.pv_source(so, weather_data) elif so['technology'] == 'windpower': self.windpower_source(so, weather_data) elif so['technology'] == 'timeseries': self.timeseries_source(so, time_series) elif so['technology'] in ['solar_thermal_flat_plate', 'concentrated_solar_power']: self.solar_heat_source(so, weather_data) for i in range(len(self.nodes_sources)): nodes.append(self.nodes_sources[i]) class Sinks: busd = None nodes_sinks = []
MIT License
facebookresearch/nevergrad
nevergrad/parametrization/core.py
as_parameter
python
def as_parameter(param: tp.Any) -> Parameter: if isinstance(param, Parameter): return param else: return Constant(param)
Returns a Parameter from anything: either the input if it is already a parameter, or a Constant if not This is convenient for iterating over Parameter and other objects alike
https://github.com/facebookresearch/nevergrad/blob/1981997603e361b1fd5b5e2aeb8173c4eae6aef0/nevergrad/parametrization/core.py#L469-L477
import uuid import warnings import numpy as np import nevergrad.common.typing as tp from nevergrad.common import errors from . import utils from ._layering import ValueProperty as ValueProperty from ._layering import Layered as Layered from ._layering import Level as Level P = tp.TypeVar("P", bound="Parameter") class Parameter(Layered): _LAYER_LEVEL = Level.ROOT value: ValueProperty[tp.Any, tp.Any] = ValueProperty() def __init__(self) -> None: super().__init__() self._subobjects = utils.Subobjects( self, base=Parameter, attribute="__dict__" ) self.parents_uids: tp.List[str] = [] self.heritage: tp.Dict[tp.Hashable, tp.Any] = {"lineage": self.uid} self.loss: tp.Optional[float] = None self._losses: tp.Optional[np.ndarray] = None self._dimension: tp.Optional[int] = None self._random_state: tp.Optional[np.random.RandomState] = None self._generation = 0 self._constraint_checkers: tp.List[tp.Callable[[tp.Any], tp.Union[bool, float]]] = [] self._name: tp.Optional[str] = None self._frozen = False self._meta: tp.Dict[tp.Hashable, tp.Any] = {} self.function = utils.FunctionInfo() @property def descriptors(self) -> utils.DeprecatedDescriptors: return utils.DeprecatedDescriptors(self) @property def losses(self) -> np.ndarray: if self._losses is not None: return self._losses if self.loss is not None: return np.array([self.loss], dtype=float) raise RuntimeError("No loss was provided") @property def args(self) -> tp.Tuple[tp.Any, ...]: return (self.value,) @property def kwargs(self) -> tp.Dict[str, tp.Any]: return {} @property def dimension(self) -> int: if self._dimension is None: try: self._dimension = self.get_standardized_data(reference=self).size except errors.UnsupportedParameterOperationError: self._dimension = 0 return self._dimension def mutate(self) -> None: self._check_frozen() self._subobjects.apply("mutate") self._layers[-1]._layered_mutate() def _layered_mutate(self) -> None: self.set_standardized_data(self.random_state.normal(size=self.dimension)) def sample(self: P) -> P: self.random_state child = self._layers[-1]._layered_sample() if not isinstance(child, Parameter) and not isinstance(child, type(self)): raise errors.NevergradRuntimeError("Unexpected sample return type") child._set_parenthood(None) return child def recombine(self: P, *others: P) -> None: if not others: return self._check_frozen() assert all(isinstance(o, self.__class__) for o in others) self._subobjects.apply("recombine", *others) self._layers[-1]._layered_recombine(*others) def get_standardized_data(self: P, *, reference: P) -> np.ndarray: assert reference is None or isinstance( reference, self.__class__ ), f"Expected {type(self)} but got {type(reference)} as reference" return self._internal_get_standardized_data(self if reference is None else reference) def _internal_get_standardized_data(self: P, reference: P) -> np.ndarray: raise errors.UnsupportedParameterOperationError( f"Export to standardized data space is not implemented for {self.name}" ) def set_standardized_data(self: P, data: tp.ArrayLike, *, reference: tp.Optional[P] = None) -> P: sent_reference = self if reference is None else reference assert isinstance( sent_reference, self.__class__ ), f"Expected {type(self)} but got {type(sent_reference)} as reference" self._check_frozen() del self.value self._internal_set_standardized_data(np.array(data, copy=False), reference=sent_reference) return self def _internal_set_standardized_data( self: P, data: np.ndarray, reference: P ) -> None: if data.size: raise errors.UnsupportedParameterOperationError( f"Import from standardized data space is not implemented for {self.name}" ) @property def generation(self) -> int: return self._generation def get_value_hash(self) -> tp.Hashable: val = self.value if isinstance(val, (str, bytes, float, int)): return val elif isinstance(val, np.ndarray): return val.tobytes() else: raise errors.UnsupportedParameterOperationError( f"Value hash is not supported for object {self.name}" ) def __repr__(self) -> str: strings = [self.name] if not callable(self.value): strings.append(str(self.value)) return ":".join(strings) def satisfies_constraints(self) -> bool: inside = self._subobjects.apply("satisfies_constraints") if not all(inside.values()): return False if not self._constraint_checkers: return True val = self.value return all(utils.float_penalty(func(val)) <= 0 for func in self._constraint_checkers) def register_cheap_constraint( self, func: tp.Union[tp.Callable[[tp.Any], bool], tp.Callable[[tp.Any], float]], as_layer: bool = False, ) -> None: if getattr(func, "__name__", "not lambda") == "<lambda>": warnings.warn("Lambda as constraint is not advised because it may not be picklable.") if not as_layer: self._constraint_checkers.append(func) else: from nevergrad.ops.constraints import Constraint import nevergrad as ng compat_func = ( func if not isinstance(self, ng.p.Instrumentation) else utils._ConstraintCompatibilityFunction(func) ) self.add_layer(Constraint(compat_func)) @property def random_state(self) -> np.random.RandomState: if self._random_state is None: seed = np.random.randint(2 ** 32, dtype=np.uint32) self._set_random_state(np.random.RandomState(seed)) assert self._random_state is not None return self._random_state @random_state.setter def random_state(self, random_state: tp.Optional[np.random.RandomState]) -> None: self._set_random_state(random_state) def _set_random_state(self, random_state: tp.Optional[np.random.RandomState]) -> None: self._random_state = random_state self._subobjects.apply("_set_random_state", random_state) def spawn_child(self: P, new_value: tp.Optional[tp.Any] = None) -> P: self.random_state child = self.copy() child._set_parenthood(self) if new_value is not None: child.value = new_value return child def copy(self: P) -> P: child = super().copy() child.uid = uuid.uuid4().hex child._frozen = False child._subobjects = self._subobjects.new(child) child._meta = {} child.parents_uids = list(self.parents_uids) child.heritage = dict(self.heritage) child.loss = None child._losses = None child._constraint_checkers = list(self._constraint_checkers) if self is not self._layers[0]: raise errors.NevergradRuntimeError("Something has gone horribly wrong with the layers") attribute = self._subobjects.attribute container = getattr(child, attribute) if attribute != "__dict__": container = dict(container) if isinstance(container, dict) else list(container) setattr(child, attribute, container) for key, val in self._subobjects.items(): container[key] = val.copy() del child.value return child def _set_parenthood(self, parent: tp.Optional["Parameter"]) -> None: if parent is None: self._generation = 0 self.heritage = dict(lineage=self.uid) self.parents_uids = [] else: self._generation = parent.generation + 1 self.parents_uids = [parent.uid] self._subobjects.apply("_set_parenthood", parent) def freeze(self) -> None: self._frozen = True self._subobjects.apply("freeze") def _check_frozen(self) -> None: if self._frozen and not isinstance( self, Constant ): raise RuntimeError( f"Cannot modify frozen Parameter {self.name}, please spawn a child and modify it instead" "(optimizers freeze the parametrization and all asked and told candidates to avoid border effects)" ) self.random_state self._subobjects.apply("_check_frozen") class Constant(Parameter): def __init__(self, value: tp.Any) -> None: super().__init__() if isinstance(value, Parameter) and not isinstance(self, MultiobjectiveReference): raise TypeError("Only non-parameters can be wrapped in a Constant") self._value = value def _get_name(self) -> str: return str(self._value) def get_value_hash(self) -> tp.Hashable: try: return super().get_value_hash() except errors.UnsupportedParameterOperationError: return "#non-hashable-constant#" def _layered_get_value(self) -> tp.Any: return self._value def _layered_set_value(self, value: tp.Any) -> None: different = False if isinstance(value, np.ndarray): if not np.equal(value, self._value).all(): different = True elif not (value == self._value or value is self._value): different = True if different: raise ValueError( f'Constant value can only be updated to the same value (in this case "{self._value}")' ) def _layered_sample(self: P) -> P: return self def get_standardized_data( self: P, *, reference: tp.Optional[P] = None ) -> np.ndarray: return np.array([]) def spawn_child(self: P, new_value: tp.Optional[tp.Any] = None) -> P: if new_value is not None: self.value = new_value return self def recombine(self: P, *others: P) -> None: pass def mutate(self) -> None: pass
MIT License
cainmagi/mdnt
data/deprecated/h5py.py
H5SupSaver.dump
python
def dump(self, keyword, data): if self.f is None: raise OSError('Should not dump data before opening a file.') self.f.create_dataset(keyword, data=data, **self.__kwargs) if self.logver > 0: print('Dump {0} into the file. The data shape is {1}.'.format(keyword, data.shape))
Dump the dataset with a keyword into the file. Arguments: keyword: the keyword of the dumped dataset. data: dataset, should be a numpy array.
https://github.com/cainmagi/mdnt/blob/4affd8a83698ce6786c04dddacdcf7415f8c5f90/data/deprecated/h5py.py#L73-L84
import h5py import numpy as np import tensorflow as tf import os REMOVE_DEPRECATION = False def depcatedInfo(): try: raise DeprecationWarning('This library has been deprecated.') except Exception as e: if not REMOVE_DEPRECATION: raise e class H5SupSaver: def __init__(self, fileName): self.f = None depcatedInfo() self.logver = 0 self.__kwargs = dict() self.open(fileName) self.config(dtype='f') def config(self, **kwargs): logver = kwargs.pop('logver', None) if logver is not None: self.logver = logver self.__kwargs.update(kwargs) if self.logver > 0: print('Current configuration is:', self.__kwargs)
MIT License
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1beta1_pipeline_list.py
V1beta1PipelineList.api_version
python
def api_version(self, api_version): self._api_version = api_version
Sets the api_version of this V1beta1PipelineList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1beta1PipelineList. # noqa: E501 :type: str
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_pipeline_list.py#L95-L104
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1beta1PipelineList(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1Pipeline]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter
Apache License 2.0
qkaiser/cottontail
cottontail/rabbitmq_management.py
RabbitMQManagementClient.get_request
python
def get_request(self, path): response = requests.get( "{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path), auth=(self._username, self._password), verify=False, timeout=5 ) if response.status_code == 200: return response.json() elif response.status_code == 401 or response.status_code == 403: raise UnauthorizedAccessException( "Authorization error: can't access /api/{}".format(path)) elif response.status_code == 404: return None else: raise Exception("An error occured")
Wrapper for GET requests to the API. Args: path (str): REST path appended to /api Returns: HTTP response JSON object. Raises: UnauthorizedException
https://github.com/qkaiser/cottontail/blob/b7f5222959cf6229ef33d7369e5e8881e6181727/cottontail/rabbitmq_management.py#L60-L87
try: from urllib.parse import quote except ImportError: from urllib import quote import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) class UnauthorizedAccessException(Exception): pass class RabbitMQManagementClient(object): def __init__(self, host, port=15672, username="guest", password="guest", ssl=False): self._host = host self._port = port self._username = username self._password = password self._scheme = "https" if ssl else "http"
BSD 3-Clause New or Revised License
nsls-ii/pyxrf
pyxrf/core/tests/test_quant_analysis.py
_create_file_with_ref_standards
python
def _create_file_with_ref_standards(*, wd): sd = _standard_data_sample[0] file_path = os.path.join(wd, ".pyxrf", "quantitative_standards.yaml") standard_data = [] for n in range(2): sd_copy = copy.deepcopy(sd) sd_copy["serial"] += f"{n}" sd_copy["name"] = f"Test reference standard #{sd_copy['serial']}" d = sd_copy["compounds"] total_density = 0 for c in d: d[c] += np.random.rand() * 2 - 1 total_density += d[c] sd_copy["density"] = total_density standard_data.append(sd_copy) save_xrf_standard_yaml_file(file_path=file_path, standard_data=standard_data) img_list = [] eline_lists = [("Ni_K", "Ga_K", "S_K"), ("S_K", "Cr_K", "Ni_K")] for elist in eline_lists: img_list.append(gen_xrf_map_dict(elines=elist)) return file_path, img_list
r""" Create a file with user-defined standards based on ``_standard_data_sample[0]``. The file contains the descriptions of 2 standards with identical sets of elements/compounds with slightly different densities. The created file is placed at the standard default location ``<wd>/.pyxrf/quantiative_standards.yaml``. Working directory (typically ``~``) is set to temporary directory for using with PyTest. It is expected that the dataset will be processed using incident energy value 12.0 keV.
https://github.com/nsls-ii/pyxrf/blob/0aa4e175f541edfaa8f71daf54b54a07e4ab2b04/pyxrf/core/tests/test_quant_analysis.py#L727-L767
import os import pytest import jsonschema import copy import numpy as np import numpy.testing as npt import time as ttime import re from pyxrf.core.utils import convert_time_from_nexus_string from pyxrf.core.xrf_utils import validate_element_str, generate_eline_list, split_compound_mass from pyxrf.core.quant_analysis import ( save_xrf_standard_yaml_file, load_xrf_standard_yaml_file, _xrf_standard_schema, load_included_xrf_standard_yaml_file, compute_standard_element_densities, _xrf_quant_fluor_schema, save_xrf_quant_fluor_json_file, load_xrf_quant_fluor_json_file, get_quant_fluor_data_dict, fill_quant_fluor_data_dict, prune_quant_fluor_data_dict, set_quant_fluor_data_dict_optional, set_quant_fluor_data_dict_time, ParamQuantEstimation, ParamQuantitativeAnalysis, ) _standard_data_sample = [ { "name": "Test Micromatter 411570", "serial": "411570", "description": "InSx 22.4 (In=16.0 S=6.4) / SrF2 20.6 / Cr 19.8 / Ni 19.2 / GaAs 19.1 (Ga=8.3 As=10.8)", "compounds": {"In": 16.0, "S": 6.4, "SrF2": 20.6, "Cr": 19.8, "Ni": 19.2, "Ga": 8.3, "As": 10.8}, "density": 101.1, }, { "name": "Test Micromatter 411640", "serial": "411640", "description": "CeF3 21.1 / Au 20.6", "compounds": {"CeF3": 21.1, "Au": 20.6} }, ] @pytest.mark.parametrize("standard_data", [ _standard_data_sample, [] ]) def test_save_xrf_standard_yaml_file1(tmp_path, standard_data): yaml_path = ["yaml", "param", "file"] file_name = "standard.yaml" yaml_path = os.path.join(tmp_path, *yaml_path, file_name) save_xrf_standard_yaml_file(yaml_path, standard_data) data_loaded = load_xrf_standard_yaml_file(yaml_path) assert data_loaded == standard_data, "Loaded data is not equal to the original data" def test_save_xrf_standard_yaml_file2(tmp_path): yaml_path = ["yaml", "param", "file"] file_name = "standard.yaml" yaml_path = os.path.join(tmp_path, *yaml_path, file_name) standard_data = [] save_xrf_standard_yaml_file(yaml_path, standard_data) with pytest.raises(IOError, match=f"File '{re.escape(yaml_path)}' already exists"): save_xrf_standard_yaml_file(yaml_path, standard_data) save_xrf_standard_yaml_file(yaml_path, standard_data, overwrite_existing=True) def test_save_xrf_standard_yaml_file3(tmp_path): yaml_path = ["yaml", "param", "file"] file_name = "standard.yaml" yaml_path = os.path.join(tmp_path, *yaml_path, file_name) standard_data = copy.deepcopy(_standard_data_sample) standard_data[0]["density"] = 50.0 save_xrf_standard_yaml_file(yaml_path, standard_data) with pytest.raises(RuntimeError, match="Sum of areal densities does not match total density"): load_xrf_standard_yaml_file(yaml_path) def test_load_xrf_standard_yaml_file1(tmp_path): file_name = "standard.yaml" yaml_path = os.path.join(tmp_path, file_name) with pytest.raises(IOError, match=f"File '{re.escape(yaml_path)}' does not exist"): load_xrf_standard_yaml_file(yaml_path) yaml_path = ["yaml", "param", "file"] yaml_path = os.path.join(tmp_path, *yaml_path, file_name) with pytest.raises(IOError, match=f"File '{re.escape(yaml_path)}' does not exist"): load_xrf_standard_yaml_file(yaml_path) def test_load_xrf_standard_yaml_file2(tmp_path): yaml_path = ["yaml", "param", "file"] file_name = "standard.yaml" yaml_path = os.path.join(tmp_path, *yaml_path, file_name) standard_data = copy.deepcopy(_standard_data_sample) for v in standard_data: v["name"] = 50.36 save_xrf_standard_yaml_file(yaml_path, standard_data) with pytest.raises(jsonschema.ValidationError): load_xrf_standard_yaml_file(yaml_path) load_xrf_standard_yaml_file(yaml_path, schema=None) schema = copy.deepcopy(_xrf_standard_schema) schema["properties"]["name"] = {"type": ["string", "number"]} standard_data_out = load_xrf_standard_yaml_file(yaml_path, schema=schema) assert standard_data_out == standard_data, "Loaded data is different from the original" def test_load_included_xrf_standard_yaml_file(): data = load_included_xrf_standard_yaml_file() assert len(data) > 1, "Standard YAML file can not be read" def test_compute_standard_element_densities(): standard_data = _standard_data_sample for data in standard_data: if "density" in data: total_density = data["density"] else: total_density = np.sum(list(data["compounds"].values())) element_densities = compute_standard_element_densities(data["compounds"]) assert all( [validate_element_str(_) for _ in element_densities.keys()] ), f"Some of the elements in the list {list(element_densities.keys())} have invalid representation" npt.assert_almost_equal( np.sum(list(element_densities.values())), total_density, err_msg="The sum of element densities and the total sum don't match", ) _xrf_standard_fluor_sample = { "name": "Hypothetical sample #41157", "serial": "41157", "description": "Name of hypothetical sample", "element_lines": { "In": {"density": 16.0, "fluorescence": 1.5453452}, "S_K": {"density": 6.4, "fluorescence": 2.0344345}, "Sr_L": {"density": 20.6, "fluorescence": 0.93452365}, "Au_M": {"density": 10.4, "fluorescence": 0.734234}, "Cr_Ka": {"density": 19.8, "fluorescence": 0.7435234}, "Ni_Kb": {"density": 19.2, "fluorescence": 0.7435234}, "Ga_Ka1": {"density": 8.3, "fluorescence": 0.7435234}, "Mg_Ka2": {"density": 9.6, "fluorescence": None}, }, "incident_energy": 10.5, "detector_channel": "sum", "scaler_name": "i0", "distance_to_sample": 50.6, "creation_time_local": "2020-01-10T11:50:39+00:00", "source_scan_id": 92276, "source_scan_uid": "f07e3065-ab92-4b20-a702-ef61ed164dbc", } def _get_data_and_json_path(tmp_path): json_path = ["json", "param", "file"] file_name = "standard.yaml" json_path = os.path.join(tmp_path, *json_path, file_name) data = _xrf_standard_fluor_sample return data, json_path def test_save_xrf_quant_fluor_json_file1(tmp_path): data, json_path = _get_data_and_json_path(tmp_path) save_xrf_quant_fluor_json_file(json_path, data) data_loaded = load_xrf_quant_fluor_json_file(json_path) assert data_loaded == data, "Loaded data is not equal to the original data" def test_save_xrf_quant_fluor_json_file2(tmp_path): data, json_path = _get_data_and_json_path(tmp_path) save_xrf_quant_fluor_json_file(json_path, data) with pytest.raises(IOError, match=f"File '{re.escape(json_path)}' already exists"): save_xrf_quant_fluor_json_file(json_path, data) save_xrf_quant_fluor_json_file(json_path, data, overwrite_existing=True) def test_save_xrf_quant_fluor_json_file3(tmp_path): data, json_path = _get_data_and_json_path(tmp_path) data = copy.deepcopy(data) data["incident_energy"] = "incident_energy" with pytest.raises(jsonschema.ValidationError): save_xrf_quant_fluor_json_file(json_path, data) def test_save_xrf_quant_fluor_json_file4(tmp_path): data, json_path = _get_data_and_json_path(tmp_path) data = copy.deepcopy(data) data["detector_channel"] = None data["scaler_name"] = None data["distance_to_sample"] = None save_xrf_quant_fluor_json_file(json_path, data) data_loaded = load_xrf_quant_fluor_json_file(json_path) assert data_loaded == data, "Loaded data is not equal to the original data" def test_load_xrf_quant_fluor_json_file1(tmp_path): _, json_path = _get_data_and_json_path(tmp_path) with pytest.raises(IOError, match=f"File '{re.escape(json_path)}' does not exist"): load_xrf_quant_fluor_json_file(json_path) def test_load_xrf_quant_fluor_json_file2(tmp_path): data, json_path = _get_data_and_json_path(tmp_path) save_xrf_quant_fluor_json_file(json_path, data) schema = copy.deepcopy(_xrf_quant_fluor_schema) schema["properties"]["scaler_name"] = {"type": "number"} with pytest.raises(jsonschema.ValidationError): load_xrf_quant_fluor_json_file(json_path, schema=schema) def test_get_quant_fluor_data_dict(): for standard_data in _standard_data_sample: quant_fluor_data_dict = get_quant_fluor_data_dict(standard_data, incident_energy=12.0) jsonschema.validate(instance=quant_fluor_data_dict, schema=_xrf_quant_fluor_schema) assert quant_fluor_data_dict["name"] == standard_data["name"], "Dictionary element 'name' is incorrect" assert ( quant_fluor_data_dict["serial"] == standard_data["serial"] ), "Dictionary element 'serial' is incorrect" assert ( quant_fluor_data_dict["description"] == standard_data["description"] ), "Dictionary element 'description' is incorrect" eline_set = set() mass_sum_expected = 0 for cmpd, mass in standard_data["compounds"].items(): em_dict = split_compound_mass(cmpd, mass) for el, ms in em_dict.items(): elines = generate_eline_list([el], incident_energy=12.0) n_elines = len(elines) if n_elines: mass_sum_expected += n_elines * ms eline_set.update(elines) eline_out_list = list(quant_fluor_data_dict["element_lines"].keys()) assert len(eline_out_list) == len(eline_set), "The number of emission lines is not as expected" assert ( set(eline_out_list) == eline_set ), "Generated object contains emission lines that are different from expected" mass_sum = sum([_["density"] for _ in quant_fluor_data_dict["element_lines"].values()]) assert ( mass_sum == mass_sum_expected ), "The total mass (density) of the components is different from expected" def gen_xrf_map_dict(nx=10, ny=5, elines=["S_K", "Au_M", "Fe_K"]): img = {} for e in elines: map = np.random.rand(ny, nx) * np.random.rand() * 10 img[e] = map map_sclr = np.ones(shape=(ny, nx), dtype=float) * np.random.rand() * 2 img["sclr"] = map_sclr return img @pytest.mark.parametrize("map_dims", [ {"nx": 10, "ny": 5}, {"nx": 1, "ny": 5}, {"nx": 2, "ny": 5}, {"nx": 3, "ny": 5}, {"nx": 10, "ny": 1}, {"nx": 10, "ny": 2}, {"nx": 10, "ny": 3}, ]) def test_fill_quant_fluor_data_dict(map_dims): fluor_standard = copy.deepcopy(_xrf_standard_fluor_sample) nx = map_dims["nx"] ny = map_dims["ny"] img = gen_xrf_map_dict(nx=nx, ny=ny) if nx < 3: nx_min, nx_max = 0, nx else: nx_min, nx_max = 1, -1 if ny < 3: ny_min, ny_max = 0, nx else: ny_min, ny_max = 1, -1 map_S_K_fluor = np.mean(img["S_K"][ny_min:ny_max, nx_min:nx_max]) map_Au_M_fluor = np.mean(img["Au_M"][ny_min:ny_max, nx_min:nx_max]) v_sclr = np.mean(img["sclr"][ny_min:ny_max, nx_min:nx_max]) fill_quant_fluor_data_dict(fluor_standard, xrf_map_dict=img, scaler_name="sclr") npt.assert_almost_equal( fluor_standard["element_lines"]["S_K"]["fluorescence"], map_S_K_fluor / v_sclr, err_msg="Fluorescence of 'S_K' is estimated incorrectly", ) npt.assert_almost_equal( fluor_standard["element_lines"]["Au_M"]["fluorescence"], map_Au_M_fluor / v_sclr, err_msg="Fluorescence of 'Au_M' is estimated incorrectly", ) for eline, param in fluor_standard["element_lines"].items(): assert (eline in img) or ( param["fluorescence"] is None ), f"Fluorescence line {eline} is not present in the dataset and it was not reset to None" fill_quant_fluor_data_dict(fluor_standard, xrf_map_dict=img, scaler_name="abc") npt.assert_almost_equal( fluor_standard["element_lines"]["S_K"]["fluorescence"], map_S_K_fluor, err_msg="Fluorescence of 'S_K' is estimated incorrectly", ) fill_quant_fluor_data_dict(fluor_standard, xrf_map_dict=img, scaler_name=None) npt.assert_almost_equal( fluor_standard["element_lines"]["S_K"]["fluorescence"], map_S_K_fluor, err_msg="Fluorescence of 'S_K' is estimated incorrectly", ) def test_prune_quant_fluor_data_dict(): fluor_standard = copy.deepcopy(_xrf_standard_fluor_sample) elines_not_none = [] for eline, info in fluor_standard["element_lines"].items(): set_to_none = np.random.rand() < 0.5 if set_to_none: info["fluorescence"] = None if info["fluorescence"] is not None: elines_not_none.append(eline) fluor_standard_pruned = prune_quant_fluor_data_dict(fluor_standard) for eline, info in fluor_standard_pruned["element_lines"].items(): assert eline in elines_not_none, f"Emission line {eline} should have been removed from the dictionary" assert info["fluorescence"] is not None, f"Emission line {eline} has 'fluorescence' set to None" def test_set_quant_fluor_data_dict_optional_1(): fluor_standard = copy.deepcopy(_xrf_standard_fluor_sample) scan_id = 45378 scan_uid = "abc-12345" set_quant_fluor_data_dict_optional(fluor_standard, scan_id=scan_id, scan_uid=scan_uid) assert fluor_standard["source_scan_id"] == scan_id, "Scan ID is set incorrectly" assert fluor_standard["source_scan_uid"] == scan_uid, "Scan UID is set incorrectly" t = fluor_standard["creation_time_local"] assert t is not None, "Time is not set" t = convert_time_from_nexus_string(t) t = ttime.mktime(t) t_current = ttime.mktime(ttime.localtime()) assert abs(t_current - t) < 5.0, "Time is set incorrectly" scan_id2 = 45346 scan_id2_str = f"{scan_id2}" set_quant_fluor_data_dict_optional(fluor_standard, scan_id=scan_id2_str) assert fluor_standard["source_scan_id"] == scan_id2, "Scan ID is set incorrectly" def test_set_quant_fluor_data_dict_optional_2(): fluor_standard = copy.deepcopy(_xrf_standard_fluor_sample) with pytest.raises(RuntimeError, match="Parameter 'scan_id' must be integer or a string representing integer"): set_quant_fluor_data_dict_optional(fluor_standard, scan_id="abc_34g") with pytest.raises(RuntimeError, match="Parameter 'scan_id' must be integer or a string representing integer"): set_quant_fluor_data_dict_optional(fluor_standard, scan_id=[1, 5, 14]) with pytest.raises(RuntimeError, match="Parameter 'scan_uid' must be a string representing scan UID"): set_quant_fluor_data_dict_optional(fluor_standard, scan_uid=[1, 5, 14]) def test_set_quant_fluor_data_dict_time(): fluor_standard = copy.deepcopy(_xrf_standard_fluor_sample) set_quant_fluor_data_dict_time(fluor_standard) t = fluor_standard["creation_time_local"] assert t is not None, "Time is not set" t = convert_time_from_nexus_string(t) t = ttime.mktime(t) t_current = ttime.mktime(ttime.localtime()) assert abs(t_current - t) < 5.0, "Time is set incorrectly" def test_ParamQuantEstimation_1(tmp_path): home_dir = tmp_path config_dir = ".pyxrf" standards_fln = "quantitative_standards.yaml" pqe = ParamQuantEstimation(home_dir=home_dir) file_path = os.path.join(home_dir, config_dir, standards_fln) assert os.path.isfile( file_path ), f"Empty file for user-defined reference standards '{file_path}' was not created" pqe.load_standards() assert pqe.standards_built_in is not None, "Failed to load built-in standards" assert pqe.standards_custom is not None, "Failed to load user-defined standards" pqe.clear_standards() assert pqe.standards_built_in is None, "Failed to clear loaded built-in standards" assert pqe.standards_custom is None, "Failed to clear loaded user-defined standards" def test_ParamQuantEstimation_2(tmp_path): standard_data = _standard_data_sample home_dir = tmp_path config_dir = ".pyxrf" standards_fln = "quantitative_standards.yaml" file_path = os.path.join(home_dir, config_dir, standards_fln) save_xrf_standard_yaml_file(file_path, standard_data) pqe = ParamQuantEstimation(home_dir=home_dir) pqe.load_standards() assert pqe.standards_built_in is not None, "Failed to load built-in standards" assert len(pqe.standards_built_in) > 0, "The number of loaded built-in standards is ZERO" assert pqe.standards_custom is not None, "Failed to load user-defined standards" assert len(pqe.standards_custom) > 0, "The number of loaded user-defined standards is ZERO" for st in pqe.standards_custom: serial = st["serial"] assert pqe._find_standard_custom(st), f"Standard {serial} was not found in user-defined list" assert not pqe._find_standard_built_in(st), f"Standard {serial} was found in built-in list" assert pqe.find_standard(st), f"Standard {serial} was not found" assert pqe.find_standard(st["name"], key="name"), f"Failed to find standard {serial} by name" assert pqe.find_standard(st["serial"], key="serial"), f"Failed to find standard {serial} by serial number" assert pqe.is_standard_custom(st), f"Standard {serial} was not identified as user-defined" pqe.set_selected_standard(st) assert pqe.standard_selected == st, f"Can't select standard {serial}" for st in pqe.standards_built_in: serial = st["serial"] assert not pqe._find_standard_custom(st), f"Standard {serial} was found in user-defined list" assert pqe._find_standard_built_in(st), f"Standard {serial} was not found in built-in list" assert pqe.find_standard(st), f"Standard {serial} was not found" assert pqe.find_standard(st["name"], key="name"), f"Failed to find standard {serial} by name" assert pqe.find_standard(st["serial"], key="serial"), f"Failed to find standard {serial} by serial number" assert not pqe.is_standard_custom(st), f"Standard {serial} was identified as user-defined" pqe.set_selected_standard(st) assert pqe.standard_selected == st, f"Can't select standard {serial}" st = {"serial": "09876", "name": "Some name"} st_selected = pqe.set_selected_standard(st) assert st_selected == pqe.standard_selected, "Return value of 'set_selected_standard' is incorrect" assert st_selected == pqe.standards_custom[0], "Incorrect standard is selected" pqe.set_selected_standard() assert st_selected == pqe.standards_custom[0], "Incorrect standard is selected" pqe.standards_custom = None pqe.set_selected_standard(st) assert pqe.standard_selected == pqe.standards_built_in[0], "Incorrect standard is selected" pqe.standards_built_in = None pqe.set_selected_standard(st) assert pqe.standard_selected is None, "Incorrect standard is selected" def test_ParamQuantEstimation_3(tmp_path): standard_data = _standard_data_sample home_dir = tmp_path config_dir = ".pyxrf" standards_fln = "quantitative_standards.yaml" file_path = os.path.join(home_dir, config_dir, standards_fln) save_xrf_standard_yaml_file(file_path, standard_data) pqe = ParamQuantEstimation(home_dir=home_dir) pqe.load_standards() incident_energy = 12.0 img = gen_xrf_map_dict() pqe.set_selected_standard() pqe.gen_fluorescence_data_dict(incident_energy=incident_energy) scaler_name = "sclr" pqe.fill_fluorescence_data_dict(xrf_map_dict=img, scaler_name=scaler_name) qfdd = get_quant_fluor_data_dict(standard_data[0], incident_energy) fill_quant_fluor_data_dict(qfdd, xrf_map_dict=img, scaler_name=scaler_name) assert ( pqe.fluorescence_data_dict == qfdd ), "The filled fluorescence data dictionary does not match the expected" pview, msg_warnings = pqe.get_fluorescence_data_dict_text_preview() assert len(msg_warnings), "Warning is not found in preview" pview, msg_warnings = pqe.get_fluorescence_data_dict_text_preview(enable_warnings=False) assert not len(msg_warnings), "Warnings are disabled, but still generated" pqe.set_detector_channel_in_data_dict(detector_channel="sum") assert pqe.fluorescence_data_dict["detector_channel"] == "sum", "Detector channel was not set correctly" distance_to_sample = 2.5 pqe.set_distance_to_sample_in_data_dict(distance_to_sample=distance_to_sample) assert ( pqe.fluorescence_data_dict["distance_to_sample"] == distance_to_sample ), "Distance-to-sample was not set correctly" scan_id = 65476 scan_uid = "abcdef-12345678" qfdd = copy.deepcopy(pqe.fluorescence_data_dict) pqe.set_optional_parameters(scan_id=scan_id, scan_uid=scan_uid) set_quant_fluor_data_dict_optional(qfdd, scan_id=scan_id, scan_uid=scan_uid) qfdd["creation_time_local"] = pqe.fluorescence_data_dict["creation_time_local"] assert pqe.fluorescence_data_dict == qfdd, "Optional parameters are not set correctly" pview, msg_warnings = pqe.get_fluorescence_data_dict_text_preview() assert not len(msg_warnings), "Preview is expected to contain no warnings" fln_suggested = pqe.get_suggested_json_fln() assert ( f"_{pqe.fluorescence_data_dict['serial']}." in fln_suggested ), f"Serial of the reference is not found in the suggested file name {fln_suggested}" file_path = os.path.join(tmp_path, fln_suggested) pqe.save_fluorescence_data_dict(file_path=file_path) qfdd = load_xrf_quant_fluor_json_file(file_path=file_path) assert qfdd == prune_quant_fluor_data_dict( pqe.fluorescence_data_dict ), "Error occurred while saving and loading calibration data dictionary. Dictionaries don't match"
BSD 3-Clause New or Revised License
pyconll/pyconll
pyconll/tree/_treebuilder.py
TreeBuilder._assert_initialization_status
python
def _assert_initialization_status(self) -> None: if self.root is None: raise ValueError( 'The TreeBuilder has not created a root for the Tree yet')
Asserts the initialization invariant on the root of this builder. Raises: ValueError: If the TreeBuilder's root has not been initialized.
https://github.com/pyconll/pyconll/blob/a69b1bfb884aab7b449e19a7cc8850dcf7e985c0/pyconll/tree/_treebuilder.py#L203-L212
from typing import Any, Generic, TypeVar from pyconll.tree.tree import Tree T = TypeVar('T') class TreeBuilder(Generic[T]): def __init__(self) -> None: self.root: Any = None self.current: Any = None self.constructed: bool = False def create_root(self, data: T) -> None: self.root = Tree(data) self.current = self.root def move_to_parent(self) -> None: self._assert_initialization_status() if self.current is self.root: raise ValueError('Currently at root. Cannot move to parent') self.current = self.current.parent def move_to_child(self, i: int) -> None: self._assert_initialization_status() try: self.current = self.current[i] except IndexError as e: raise IndexError( '{}-th child is out of range. There are {} children on this node' .format(i, len(self.current))) from e def move_to_root(self) -> None: self._assert_initialization_status() self.current = self.root def set_data(self, data: T) -> None: self._assert_initialization_status() self._copy_if_necessary() self.current._data = data def remove_child(self, i: int) -> None: self._assert_initialization_status() self._copy_if_necessary() try: del self.current._children[i] except IndexError as e: raise IndexError( '{}-th child is out of range. There are {} children on this node' .format(i, len(self.current))) from e def add_child(self, data: T, move: bool = False) -> None: self._assert_initialization_status() self._copy_if_necessary() child: Tree[T] = Tree(data) child._parent = self.current self.current._children.append(child) if move: l = len(self.current) self.move_to_child(l - 1) def build(self) -> Tree[T]: self._assert_initialization_status() self.constructed = True return self.root def _copy_if_necessary(self) -> None: if self.constructed: self._copy() self.constructed = False def _copy(self) -> None: new_root: Tree[T] = Tree(self.root.data) new_current = None if self.current is self.root: new_current = new_root queue = [(new_root, self.root._children)] while queue: new_parent, children = queue.pop() new_children = [] for child in children: new_child: Tree[T] = Tree(child.data) new_child._parent = new_parent new_children.append(new_child) queue.append((new_child, child._children)) if self.current is child: new_current = new_child new_parent._children = new_children self.root = new_root self.current = new_current
MIT License
merll/docker-map
dockermap/map/client.py
MappingDockerClient.restart
python
def restart(self, container, instances=None, map_name=None, **kwargs): return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs)
Restarts instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will restart all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container restart. :return: Return values of restarted containers. :rtype: list[dockermap.map.runner.ActionOutput]
https://github.com/merll/docker-map/blob/54e325595fc0b6b9d154dacc790a222f957895da/dockermap/map/client.py#L284-L300
from __future__ import unicode_literals import logging import sys from ..exceptions import PartialResultsError from .action import simple, script, update from .config.client import ClientConfiguration from .config.main import ContainerMap from .config.utils import get_map_config_ids from .exceptions import ActionException, ActionRunnerException from .policy.base import BasePolicy from .runner.base import DockerClientRunner from .state.base import (SingleStateGenerator, DependencyStateGenerator, DependentStateGenerator, ImageDependencyStateGenerator) from .state.update import UpdateStateGenerator log = logging.getLogger(__name__) def _set_forced_update_ids(kwargs, maps, default_map_name, default_instances): value = kwargs.pop('force_update', None) if not value: return config_ids = get_map_config_ids(value, maps, default_map_name, default_instances) if config_ids: kwargs['force_update'] = set(config_ids) class MappingDockerClient(object): configuration_class = ClientConfiguration policy_class = BasePolicy generators = { 'create': (DependencyStateGenerator, simple.CreateActionGenerator), 'start': (DependencyStateGenerator, simple.StartActionGenerator), 'restart': (SingleStateGenerator, simple.RestartActionGenerator), 'stop': (DependentStateGenerator, simple.StopActionGenerator), 'remove': (DependentStateGenerator, simple.RemoveActionGenerator), 'startup': (DependencyStateGenerator, simple.StartupActionGenerator), 'shutdown': (DependentStateGenerator, simple.ShutdownActionGenerator), 'update': (UpdateStateGenerator, update.UpdateActionGenerator), 'script': (DependencyStateGenerator, script.ScriptActionGenerator), 'signal': (SingleStateGenerator, simple.SignalActionGenerator), 'pull_images': (ImageDependencyStateGenerator, simple.ImagePullActionGenerator), } runner_class = DockerClientRunner def __init__(self, container_maps=None, docker_client=None, clients=None, map_defaults=None, option_defaults=None): if container_maps: if isinstance(container_maps, ContainerMap): self._default_map = container_maps.name self._maps = {container_maps.name: container_maps} elif isinstance(container_maps, (list, tuple)): self._default_map = None self._maps = {c_map.name: c_map for c_map in container_maps} elif isinstance(container_maps, dict): self._default_map = None self._maps = container_maps else: raise ValueError("Unexpected type of 'container_maps' argument: {0}".format(type(container_maps).__name__)) else: self._default_map = None self._maps = {} if clients and isinstance(clients, (list, tuple)): self._clients = dict(clients) else: self._clients = clients or {} if docker_client is not None: if isinstance(docker_client, ClientConfiguration): default_client = docker_client else: default_client = self.configuration_class.from_client(docker_client) self._clients[self.policy_class.default_client_name] = default_client self._map_defaults = map_defaults or {} self._option_defaults = option_defaults or {} self._policy = None def get_policy(self): if not self._policy: self._policy = self.policy_class(self._maps, self._clients, self._map_defaults, self._option_defaults) return self._policy def get_state_generator(self, action_name, policy, kwargs): state_generator_cls = self.generators[action_name][0] state_generator = state_generator_cls(policy, kwargs) return state_generator def get_action_generator(self, action_name, policy, kwargs): action_generator_cls = self.generators[action_name][1] action_generator = action_generator_cls(policy, kwargs) return action_generator def get_runner(self, policy, kwargs): return self.runner_class(policy, kwargs) def get_states(self, action_name, config_name, instances=None, map_name=None, **kwargs): policy = self.get_policy() _set_forced_update_ids(kwargs, policy.container_maps, map_name or self._default_map, instances) state_generator = self.get_state_generator(action_name, policy, kwargs) log.debug("Remaining kwargs passed to client actions: %s", kwargs) config_ids = get_map_config_ids(config_name, policy.container_maps, map_name or self._default_map, instances) log.debug("Generating states for configurations: %s", config_ids) return state_generator.get_states(config_ids) def get_actions(self, action_name, config_name, instances=None, map_name=None, **kwargs): policy = self.get_policy() action_generator = self.get_action_generator(action_name, policy, kwargs) for state in self.get_states(action_name, config_name, instances=instances, map_name=map_name, **kwargs): log.debug("Evaluating state: %s.", state) actions = action_generator.get_state_actions(state, **kwargs) if actions: log.debug("Running actions: %s", actions) yield actions else: log.debug("No actions returned.") def run_actions(self, action_name, config_name, instances=None, map_name=None, **kwargs): policy = self.get_policy() results = [] runner = self.get_runner(policy, kwargs) for action_list in self.get_actions(action_name, config_name, instances, map_name, **kwargs): try: for res in runner.run_actions(action_list): results.append(res) except ActionException as ae: raise ActionRunnerException.from_action_exception(ae, results) except: exc_info = sys.exc_info() raise PartialResultsError(exc_info, results) return results def create(self, container, instances=None, map_name=None, **kwargs): return self.run_actions('create', container, instances=instances, map_name=map_name, **kwargs) def start(self, container, instances=None, map_name=None, **kwargs): return self.run_actions('start', container, instances=instances, map_name=map_name, **kwargs)
MIT License
pyannote/pyannote-database
pyannote/database/protocol/speaker_recognition.py
SpeakerRecognitionProtocol.train
python
def train(self, yield_name=False): generator = self.trn_iter() for name, item in generator: if yield_name: yield name, self.preprocess(item) else: yield self.preprocess(item)
Iterate over the training set This will yield dictionaries with the followings keys: * database: str unique database identifier * uri: str unique recording identifier * channel: int index of resource channel to use * speaker: str unique speaker identifier as well as keys coming from the provided preprocessors. Usage ----- >>> for item in protocol.train(): ... uri = item['uri'] ... channel = item['channel'] ... speaker = item['speaker']
https://github.com/pyannote/pyannote-database/blob/7391b48e70f087dd963776d37257321bed1e313a/pyannote/database/protocol/speaker_recognition.py#L95-L125
from .protocol import Protocol class SpeakerRecognitionProtocol(Protocol): def trn_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "trn_iter".' ) def trn_enroll_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "trn_enroll_iter".' ) def trn_test_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "trn_test_iter".' ) def trn_keys(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "trn_keys".' ) def dev_enroll_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "dev_enroll_iter".' ) def dev_test_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "dev_test_iter".' ) def dev_keys(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "dev_keys".' ) def tst_enroll_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "tst_enroll_iter".' ) def tst_test_iter(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement "tst_test_iter".' ) def tst_keys(self): raise NotImplementedError( "Custom speaker recognition protocol " 'should implement tst_keys".' )
MIT License
chaffelson/whoville
whoville/cloudbreak/models/rds_config_response.py
RDSConfigResponse.cluster_names
python
def cluster_names(self, cluster_names): self._cluster_names = cluster_names
Sets the cluster_names of this RDSConfigResponse. list of clusters which use config :param cluster_names: The cluster_names of this RDSConfigResponse. :type: list[str]
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/models/rds_config_response.py#L284-L293
from pprint import pformat from six import iteritems import re class RDSConfigResponse(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str', 'connection_url': 'str', 'type': 'str', 'connector_jar_url': 'str', 'id': 'int', 'creation_date': 'int', 'public_in_account': 'bool', 'cluster_names': 'list[str]', 'stack_version': 'str', 'database_engine': 'str', 'connection_driver': 'str', 'database_engine_display_name': 'str', 'workspace': 'WorkspaceResourceResponse' } attribute_map = { 'name': 'name', 'connection_url': 'connectionURL', 'type': 'type', 'connector_jar_url': 'connectorJarUrl', 'id': 'id', 'creation_date': 'creationDate', 'public_in_account': 'publicInAccount', 'cluster_names': 'clusterNames', 'stack_version': 'stackVersion', 'database_engine': 'databaseEngine', 'connection_driver': 'connectionDriver', 'database_engine_display_name': 'databaseEngineDisplayName', 'workspace': 'workspace' } def __init__(self, name=None, connection_url=None, type=None, connector_jar_url=None, id=None, creation_date=None, public_in_account=False, cluster_names=None, stack_version=None, database_engine=None, connection_driver=None, database_engine_display_name=None, workspace=None): self._name = None self._connection_url = None self._type = None self._connector_jar_url = None self._id = None self._creation_date = None self._public_in_account = None self._cluster_names = None self._stack_version = None self._database_engine = None self._connection_driver = None self._database_engine_display_name = None self._workspace = None self.name = name self.connection_url = connection_url self.type = type if connector_jar_url is not None: self.connector_jar_url = connector_jar_url if id is not None: self.id = id if creation_date is not None: self.creation_date = creation_date if public_in_account is not None: self.public_in_account = public_in_account if cluster_names is not None: self.cluster_names = cluster_names if stack_version is not None: self.stack_version = stack_version self.database_engine = database_engine self.connection_driver = connection_driver self.database_engine_display_name = database_engine_display_name if workspace is not None: self.workspace = workspace @property def name(self): return self._name @name.setter def name(self, name): if name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def connection_url(self): return self._connection_url @connection_url.setter def connection_url(self, connection_url): if connection_url is None: raise ValueError("Invalid value for `connection_url`, must not be `None`") self._connection_url = connection_url @property def type(self): return self._type @type.setter def type(self, type): if type is None: raise ValueError("Invalid value for `type`, must not be `None`") self._type = type @property def connector_jar_url(self): return self._connector_jar_url @connector_jar_url.setter def connector_jar_url(self, connector_jar_url): self._connector_jar_url = connector_jar_url @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def creation_date(self): return self._creation_date @creation_date.setter def creation_date(self, creation_date): self._creation_date = creation_date @property def public_in_account(self): return self._public_in_account @public_in_account.setter def public_in_account(self, public_in_account): self._public_in_account = public_in_account @property def cluster_names(self): return self._cluster_names @cluster_names.setter
Apache License 2.0
smartelect/smartelect
civil_registry/tests/factories.py
CitizenFactory._setup_next_sequence
python
def _setup_next_sequence(cls): return 1
Set up an initial sequence value for Sequence attributes. Returns: int: the first available ID to use for instances of this factory. Note: If we don't override this, then DjangoModelFactory bases the initial value on the max PK of the corresponding model, which in my case is absolutely huge and breaks the way we come up with a valid national ID, above.
https://github.com/smartelect/smartelect/blob/d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8/civil_registry/tests/factories.py#L52-L63
from datetime import date import random import string import factory import factory.fuzzy from civil_registry.models import Citizen, TempCitizen from libya_elections.constants import MALE def get_nid(stub): from register.tests.factories import get_unused_gender_appropriate_national_id return get_unused_gender_appropriate_national_id(stub) def get_unused_civil_registry_id(stub): civil_registry_id = random.randint(1, 999999999) while Citizen.objects.filter(pk=civil_registry_id).exists(): civil_registry_id = random.randint(1, 999999999) return civil_registry_id class CitizenFactory(factory.DjangoModelFactory): class Meta: model = Citizen civil_registry_id = factory.LazyAttribute(get_unused_civil_registry_id) national_id = factory.LazyAttribute(get_nid) birth_date = date(1913, 2, 3) gender = MALE fbr_number = factory.fuzzy.FuzzyText(prefix='se', chars=string.digits) first_name = factory.fuzzy.FuzzyText() father_name = factory.fuzzy.FuzzyText() grandfather_name = factory.fuzzy.FuzzyText() mother_name = factory.fuzzy.FuzzyText() family_name = factory.fuzzy.FuzzyText() missing = None @classmethod
Apache License 2.0
http-apis/hydra-python-agent
hydra_agent/tests/test_redis.py
Tests.collection_endpoints
python
def collection_endpoints(self): print("testing collection endpoints with db=0 ...") query = ('GRAPH.QUERY','apigraph', "MATCH (p:collection) RETURN p") redis_db = redis.StrictRedis(host='localhost', port=6379, db=0) redis_reply = [[[b'p.id', b'p.operations', b'p.type'], [b'vocab:EntryPoint/HttpApiLogCollection', b"['GET', 'PUT']", b'HttpApiLogCollection'], [b'vocab:EntryPoint/AnomalyCollection', b"['GET', 'PUT']", b'AnomalyCollection'], [b'vocab:EntryPoint/CommandCollection', b"['GET', 'PUT']", b'CommandCollection'], [b'vocab:EntryPoint/ControllerLogCollection', b"['GET', 'PUT']", b'ControllerLogCollection'], [b'vocab:EntryPoint/DatastreamCollection', b"['GET', 'PUT']", b'DatastreamCollection'], [b'vocab:EntryPoint/MessageCollection', b"['GET', 'PUT']", b'MessageCollection'], [b'vocab:EntryPoint/DroneLogCollection', b"['GET', 'PUT']", b'DroneLogCollection'], [b'vocab:EntryPoint/DroneCollection', b"['GET', 'PUT']", b'DroneCollection']], [b'Query internal execution time: 0.089501 milliseconds']] redis_db_execute_command_query = MagicMock(return_value = redis_reply) property_list = redis_reply[0][0] if (b"p.id" in property_list and b"p.operations" in property_list and b"p.type" in property_list): return True else: return False
Test for testing the data stored in collection endpoints `redis_reply` is data which will get from redis_db_0 on `query` execution.
https://github.com/http-apis/hydra-python-agent/blob/e2bcd51f3cbf0700cd44e5e392e3c21af2cbd2a3/hydra_agent/tests/test_redis.py#L26-L43
import unittest import redis from unittest.mock import MagicMock class Tests: def entry_point(self): print("testing entrypoint with db=0 ...") query = ('GRAPH.QUERY','apigraph', "MATCH (p:id) RETURN p") redis_db = redis.StrictRedis(host='localhost', port=6379, db=0) redis_reply = [[[b'p.url', b'p.id', b'p.supportedOperation'], [b'http://localhost:8080/api', b'vocab:Entrypoint', b'GET']], [b'Query internal execution time: 0.071272 milliseconds']] redis_db_execute_command_query = MagicMock(return_value = redis_reply) property_list = redis_reply[0][0] if (b"p.id" in property_list and b"p.url" in property_list and b"p.supportedOperation" in property_list): return True else: return False
MIT License
ollo69/ha_tuya_custom
custom_components/tuya_custom/__init__.py
TuyaDevice.object_id
python
def object_id(self): return self._tuya.object_id()
Return Tuya device id.
https://github.com/ollo69/ha_tuya_custom/blob/66b8722afc9c771318b8c67865907e5ac0aac602/custom_components/tuya_custom/__init__.py#L380-L382
import asyncio from datetime import timedelta import logging from .tuyaha.tuyaapi import ( DEFAULTREGION, TuyaApi, TuyaAPIException, TuyaAPIRateLimitException, TuyaFrequentlyInvokeException, TuyaNetException, TuyaServerException, ) import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import ( CONF_PASSWORD, CONF_PLATFORM, CONF_REGION, CONF_USERNAME, ) from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import ConfigEntryNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from .const import ( CONF_COUNTRYCODE, CONF_DISCOVERY_INTERVAL, CONF_QUERY_DEVICE, CONF_QUERY_INTERVAL, DEFAULT_DISCOVERY_INTERVAL, DEFAULT_QUERY_INTERVAL, DOMAIN, SIGNAL_CONFIG_ENTITY, SIGNAL_DELETE_ENTITY, SIGNAL_UPDATE_ENTITY, TUYA_DATA, TUYA_DEVICES_CONF, TUYA_DISCOVERY_NEW, TUYA_PLATFORMS, TUYA_TYPE_NOT_QUERY, ) _LOGGER = logging.getLogger(__name__) ATTR_TUYA_DEV_ID = "tuya_device_id" ENTRY_IS_SETUP = "tuya_entry_is_setup" SERVICE_FORCE_UPDATE = "force_update" SERVICE_PULL_DEVICES = "pull_devices" TUYA_TYPE_TO_HA = { "climate": "climate", "cover": "cover", "fan": "fan", "light": "light", "scene": "scene", "switch": "switch", } TUYA_TRACKER = "tuya_tracker" TUYA_DEVICE_CONF_SCHEMA = { vol.Optional(TUYA_DEVICES_CONF): vol.All( cv.ensure_list, [ vol.Schema( { vol.Required("device_name"): cv.string, vol.Optional("unit_of_measurement"): cv.temperature_unit, vol.Optional("temp_divider", default=0): cv.positive_int, vol.Optional("curr_temp_divider", default=0): cv.positive_int, vol.Optional("ext_temp_sensor"): cv.string, vol.Optional("support_color"): cv.boolean, vol.Optional("brightness_range_mode", default=0): cv.positive_int, vol.Optional("min_kelvin"): cv.positive_int, vol.Optional("max_kelvin"): cv.positive_int, vol.Optional("max_tuya_temp"): cv.positive_int, } ) ], ) } CONFIG_SCHEMA = vol.Schema( vol.All( cv.deprecated(DOMAIN), { DOMAIN: vol.Schema( { vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_COUNTRYCODE): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(CONF_PLATFORM, default="tuya"): cv.string, } ).extend(TUYA_DEVICE_CONF_SCHEMA) }, ), extra=vol.ALLOW_EXTRA, ) def _update_discovery_interval(hass, interval): tuya = hass.data[DOMAIN].get(TUYA_DATA) if not tuya: return try: tuya.discovery_interval = interval _LOGGER.info("Tuya discovery device poll interval set to %s seconds", interval) except ValueError as ex: _LOGGER.warning(ex) def _update_query_interval(hass, interval): tuya = hass.data[DOMAIN].get(TUYA_DATA) if not tuya: return try: tuya.query_interval = interval _LOGGER.info("Tuya query device poll interval set to %s seconds", interval) except ValueError as ex: _LOGGER.warning(ex) async def async_setup(hass, config): conf = config.get(DOMAIN) if conf is not None: devices_config = conf.pop(TUYA_DEVICES_CONF, {}) if devices_config: _LOGGER.warning( "Options from configuration.yaml is not supported anymore. Use integration options" ) user = conf.get(CONF_USERNAME) pwd = conf.get(CONF_PASSWORD) country = conf.get(CONF_COUNTRYCODE) if user and pwd and country: hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=conf ) ) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): tuya = TuyaApi() username = entry.data[CONF_USERNAME] password = entry.data[CONF_PASSWORD] country_code = entry.data[CONF_COUNTRYCODE] platform = entry.data[CONF_PLATFORM] region = entry.data.get(CONF_REGION, DEFAULTREGION) try: await hass.async_add_executor_job( tuya.init, username, password, country_code, platform, region ) except ( TuyaNetException, TuyaServerException, TuyaFrequentlyInvokeException, ) as exc: raise ConfigEntryNotReady() from exc except TuyaAPIRateLimitException as exc: _LOGGER.error("Tuya login rate limited") raise ConfigEntryNotReady() from exc except TuyaAPIException as exc: _LOGGER.error( "Connection error during integration setup. Error: %s", exc, ) return False hass.data[DOMAIN] = { TUYA_DATA: tuya, TUYA_DEVICES_CONF: entry.options.copy(), TUYA_TRACKER: None, ENTRY_IS_SETUP: set(), "entities": {}, "pending": {}, "listener": entry.add_update_listener(update_listener), } _update_discovery_interval( hass, entry.options.get(CONF_DISCOVERY_INTERVAL, DEFAULT_DISCOVERY_INTERVAL) ) _update_query_interval( hass, entry.options.get(CONF_QUERY_INTERVAL, DEFAULT_QUERY_INTERVAL) ) async def async_load_devices(device_list): device_type_list = {} for device in device_list: dev_type = device.device_type() if ( dev_type in TUYA_TYPE_TO_HA and device.object_id() not in hass.data[DOMAIN]["entities"] ): ha_type = TUYA_TYPE_TO_HA[dev_type] if ha_type not in device_type_list: device_type_list[ha_type] = [] device_type_list[ha_type].append(device.object_id()) hass.data[DOMAIN]["entities"][device.object_id()] = None for ha_type, dev_ids in device_type_list.items(): config_entries_key = f"{ha_type}.tuya" if config_entries_key not in hass.data[DOMAIN][ENTRY_IS_SETUP]: hass.data[DOMAIN]["pending"][ha_type] = dev_ids hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, ha_type) ) hass.data[DOMAIN][ENTRY_IS_SETUP].add(config_entries_key) else: async_dispatcher_send(hass, TUYA_DISCOVERY_NEW.format(ha_type), dev_ids) await async_load_devices(tuya.get_all_devices()) def _get_updated_devices(): try: tuya.poll_devices_update() except TuyaFrequentlyInvokeException as exc: _LOGGER.error(exc) return tuya.get_all_devices() async def async_poll_devices_update(event_time): _LOGGER.debug("Pull devices from Tuya") device_list = await hass.async_add_executor_job(_get_updated_devices) await async_load_devices(device_list) newlist_ids = [] for device in device_list: newlist_ids.append(device.object_id()) for dev_id in list(hass.data[DOMAIN]["entities"]): if dev_id not in newlist_ids: async_dispatcher_send(hass, SIGNAL_DELETE_ENTITY, dev_id) hass.data[DOMAIN]["entities"].pop(dev_id) hass.data[DOMAIN][TUYA_TRACKER] = async_track_time_interval( hass, async_poll_devices_update, timedelta(minutes=2) ) hass.services.async_register( DOMAIN, SERVICE_PULL_DEVICES, async_poll_devices_update ) async def async_force_update(call): async_dispatcher_send(hass, SIGNAL_UPDATE_ENTITY) hass.services.async_register(DOMAIN, SERVICE_FORCE_UPDATE, async_force_update) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload( entry, component.split(".", 1)[0] ) for component in hass.data[DOMAIN][ENTRY_IS_SETUP] ] ) ) if unload_ok: hass.data[DOMAIN]["listener"]() hass.data[DOMAIN][TUYA_TRACKER]() hass.services.async_remove(DOMAIN, SERVICE_FORCE_UPDATE) hass.services.async_remove(DOMAIN, SERVICE_PULL_DEVICES) hass.data.pop(DOMAIN) return unload_ok async def update_listener(hass: HomeAssistant, entry: ConfigEntry): hass.data[DOMAIN][TUYA_DEVICES_CONF] = entry.options.copy() _update_discovery_interval( hass, entry.options.get(CONF_DISCOVERY_INTERVAL, DEFAULT_DISCOVERY_INTERVAL) ) _update_query_interval( hass, entry.options.get(CONF_QUERY_INTERVAL, DEFAULT_QUERY_INTERVAL) ) async_dispatcher_send(hass, SIGNAL_CONFIG_ENTITY) async def cleanup_device_registry(hass: HomeAssistant, device_id): device_registry = await hass.helpers.device_registry.async_get_registry() entity_registry = await hass.helpers.entity_registry.async_get_registry() if device_id and not hass.helpers.entity_registry.async_entries_for_device( entity_registry, device_id, include_disabled_entities=True ): device_registry.async_remove_device(device_id) class TuyaDevice(Entity): _dev_can_query_count = 0 def __init__(self, tuya, platform): self._tuya = tuya self._tuya_platform = platform def _device_can_query(self): dev_type = self._tuya.device_type() return dev_type not in TUYA_TYPE_NOT_QUERY def _inc_device_count(self): if not self._device_can_query(): return TuyaDevice._dev_can_query_count += 1 def _dec_device_count(self): if not self._device_can_query(): return TuyaDevice._dev_can_query_count -= 1 def _get_device_config(self): devices_config = self.hass.data[DOMAIN].get(TUYA_DEVICES_CONF) if not devices_config: return {} dev_conf = devices_config.get(self.object_id, {}) if dev_conf: _LOGGER.debug( "Configuration for deviceID %s: %s", self.object_id, str(dev_conf) ) return dev_conf async def async_added_to_hass(self): self.hass.data[DOMAIN]["entities"][self.object_id] = self.entity_id self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_DELETE_ENTITY, self._delete_callback ) ) self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback ) ) self._inc_device_count() async def async_will_remove_from_hass(self): self._dec_device_count() @property
Apache License 2.0
erigones/esdc-ce
api/serializers.py
BaseSerializer.get_field_key
python
def get_field_key(self, field_name): return field_name
Return the key that should be used for a given field.
https://github.com/erigones/esdc-ce/blob/f83a62d0d430e3c8f9aac23d958583b0efce4312/api/serializers.py#L356-L360
from __future__ import unicode_literals import copy import datetime import inspect import types from collections import OrderedDict from decimal import Decimal from django.apps import apps from django.core.paginator import Page from django.db import models from django.forms import widgets from django.utils import six from django.utils.functional import cached_property from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ValidationError, ObjectDoesNotExist as DjangoObjectDoesNotExist from django.contrib.contenttypes.fields import GenericForeignKey from api.relations import * from api.fields import * from api.fields import is_simple_callable, get_component def _resolve_model(obj): if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') return apps.get_model(app_name, model_name) elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj else: raise ValueError("{0} is not a Django model".format(obj)) def pretty_name(name): if not name: return '' return name.replace('_', ' ').capitalize() def field_value(source, value): for component in source.split('.'): value = get_component(value, component) if value is None: break return value @python_2_unicode_compatible class ErrorList(list): def __str__(self): return str([force_text(i) for i in self]) def __repr__(self): return repr([force_text(i) for i in self]) def __iter__(self): return (force_text(i) for i in list.__iter__(self)) class RelationsList(list): _deleted = [] class APIValidationError(ValidationError): @property def api_errors(self): if hasattr(self, 'message'): return ErrorList([self.message]) return self.messages class ObjectDoesNotExist(ValidationError): def __init__(self, value, field_name='name', **kwargs): message = _('Object with %(field)s=%(value)s does not exist.') % {'field': field_name, 'value': value} super(ObjectDoesNotExist, self).__init__(message, **kwargs) class NoPermissionToModify(ValidationError): def __init__(self, **kwargs): message = _('You don\'t have permission to modify this attribute.') super(NoPermissionToModify, self).__init__(message, **kwargs) class NestedValidationError(ValidationError): def __init__(self, message): if isinstance(message, dict): self._messages = [message] else: self._messages = message @property def messages(self): return self._messages class DictWithMetadata(dict): def __getstate__(self): return dict(self) class SortedDictWithMetadata(OrderedDict): def __reduce__(self): return self.__class__, (OrderedDict(self), ) def __getstate__(self): return OrderedDict(self).__dict__ def _is_protected_type(obj): return isinstance(obj, six.string_types + six.integer_types + ( types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal, )) def _get_declared_fields(bases, attrs): fields = [(field_name, attrs.pop(field_name)) for field_name, obj in list(six.iteritems(attrs)) if isinstance(obj, Field)] fields.sort(key=lambda x: x[1].creation_counter) for base in bases[::-1]: if hasattr(base, 'base_fields'): fields = list(base.base_fields.items()) + fields return OrderedDict(fields) class SerializerMetaclass(type): def __new__(mcs, name, bases, attrs): attrs['base_fields'] = _get_declared_fields(bases, attrs) return super(SerializerMetaclass, mcs).__new__(mcs, name, bases, attrs) class SerializerOptions(object): def __init__(self, meta): self.depth = getattr(meta, 'depth', 0) self.fields = getattr(meta, 'fields', ()) self.exclude = getattr(meta, 'exclude', ()) class BaseSerializer(WritableField): class Meta(object): pass _options_class = SerializerOptions _dict_class = SortedDictWithMetadata def __init__(self, instance=None, data=None, files=None, context=None, partial=False, many=False, allow_add_remove=False, **kwargs): super(BaseSerializer, self).__init__(**kwargs) self.opts = self._options_class(self.Meta) self.parent = None self.root = None self.partial = partial self.many = many self.allow_add_remove = allow_add_remove self.context = context or {} self.init_data = data self.init_files = files self.object = instance self._data = None self._files = None self._errors = None if many and instance is not None and not hasattr(instance, '__iter__'): raise ValueError('instance should be a queryset or other iterable with many=True') if allow_add_remove and not many: raise ValueError('allow_add_remove should only be used for bulk updates, but you have not set many=True') @cached_property def fields(self): return self.get_fields() def get_default_fields(self): return {} def get_fields(self): ret = OrderedDict() base_fields = copy.deepcopy(self.base_fields) for key, field in base_fields.items(): ret[key] = field default_fields = self.get_default_fields() for key, val in default_fields.items(): if key not in ret: ret[key] = val if self.opts.fields: assert isinstance(self.opts.fields, (list, tuple)), '`fields` must be a list or tuple' new = OrderedDict() for key in self.opts.fields: new[key] = ret[key] ret = new if self.opts.exclude: assert isinstance(self.opts.exclude, (list, tuple)), '`exclude` must be a list or tuple' for key in self.opts.exclude: ret.pop(key, None) for key, field in ret.items(): field.initialize(parent=self, field_name=key) return ret
Apache License 2.0
hathornetwork/hathor-core
hathor/stratum/stratum.py
StratumProtocol.create_job_tx
python
def create_job_tx(self, jobid: UUID) -> BaseTransaction: if self.mine_txs and self.factory.tx_queue: funds_hash = self.factory.tx_queue[0] tx = self.factory.mining_tx_pool[funds_hash] tx.timestamp = self.factory.get_current_timestamp() tx.parents = self.manager.get_new_tx_parents(tx.timestamp) self.log.debug('prepared tx for mining', tx=tx) return tx peer_id = self.manager.my_peer.id assert peer_id is not None assert self.miner_id is not None data = '{}-{}-{}'.format(peer_id[:32], self.miner_id.hex, jobid.hex).encode()[:settings.BLOCK_DATA_MAX_SIZE] block = self.manager.generate_mining_block(data=data, address=self.miner_address, merge_mined=self.merged_mining) self.log.debug('prepared block for mining', block=block) return block
Creates a BaseTransaction for the designated miner job. :return: created BaseTransaction :rtype: BaseTransaction
https://github.com/hathornetwork/hathor-core/blob/b8bd2428b9fab4f53dfc4d92de230ffae48fbf46/hathor/stratum/stratum.py#L640-L667
from abc import ABC, abstractmethod from hashlib import sha256 from itertools import count from json import JSONDecodeError from math import log from multiprocessing import Process, Queue as MQueue from multiprocessing.sharedctypes import Array, Value from os import cpu_count from string import hexdigits from time import sleep from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Set, Tuple, Union, cast from uuid import UUID, uuid4 from structlog import get_logger from twisted.internet import reactor, task from twisted.internet.defer import Deferred from twisted.internet.interfaces import IAddress, IDelayedCall, IReactorCore, IReactorTCP from twisted.internet.protocol import Factory from twisted.protocols.basic import LineReceiver from twisted.python.failure import Failure from hathor.conf import HathorSettings from hathor.crypto.util import decode_address from hathor.exception import InvalidNewTransaction from hathor.pubsub import EventArguments, HathorEvents from hathor.transaction import BaseTransaction, BitcoinAuxPow, Block, MergeMinedBlock, Transaction, sum_weights from hathor.transaction.exceptions import PowError, ScriptError, TxValidationError from hathor.util import json_dumpb, json_loadb from hathor.wallet.exceptions import InvalidAddress if TYPE_CHECKING: from multiprocessing.sharedctypes import _Array, _Value from hathor.manager import HathorManager logger = get_logger() settings = HathorSettings() def valid_uuid(uuid: Any) -> bool: return isinstance(uuid, str) and len(uuid) == 32 and all(c in hexdigits for c in uuid) def valid_uuid_or_none(uuid: Any) -> bool: return uuid is None or valid_uuid(uuid) UNRECOVERABLE_ERROR_CODE_MAX = -32600 PARSE_ERROR = {'code': -32700, 'message': 'Parse error'} INTERNAL_ERROR = {'code': -32603, 'message': 'Internal error'} INVALID_PARAMS = {'code': -32602, 'message': 'Invalid params'} METHOD_NOT_FOUND = {'code': -32601, 'message': 'Method not found'} INVALID_REQUEST = {'code': -32600, 'message': 'Invalid Request'} NODE_SYNCING = {'code': 10, 'message': 'Node syncing'} INVALID_ADDRESS = {'code': 22, 'message': 'Address to send mined funds is invalid'} INVALID_SOLUTION = {'code': 30, 'message': 'Invalid solution'} STALE_JOB = {'code': 31, 'message': 'Stale job submitted'} JOB_NOT_FOUND = {'code': 32, 'message': 'Job not found'} PROPAGATION_FAILED = {'code': 33, 'message': 'Solution propagation failed'} DUPLICATE_SOLUTION = {'code': 34, 'message': 'Solution already submitted'} class ServerJob: id: UUID created: int submitted: Optional[int] miner: UUID tx: BaseTransaction weight: float timeoutTask: IDelayedCall def __init__(self, jobid: UUID, created: int, miner: UUID, tx: BaseTransaction, weight: float): self.id = jobid self.created = created self.miner = miner self.tx = tx self.submitted = None self.weight = weight class MinerJob(NamedTuple): data: '_Array' = Array('B', 2048) data_size: '_Value' = Value('I') job_id: '_Array' = Array('B', 16) nonce_size: '_Value' = Value('I') weight: '_Value' = Value('d') def update_job(self, params: Dict[str, Any]) -> bool: try: data = bytes.fromhex(params['data']) data_size: int = len(data) self.data[:data_size] = data self.data_size.value = data_size self.job_id[:] = bytes.fromhex(params['job_id']) self.nonce_size.value = int(params['nonce_size']) self.weight.value = float(params['weight']) except KeyError: return False return True class MinerSubmit(NamedTuple): job_id: str nonce: str = '' aux_pow: str = '' class MinerStatistics(NamedTuple): address: str blocks_found: int completed_jobs: int connection_start_time: int estimated_hash_rate: float miner_id: str class JSONRPC(LineReceiver, ABC): delimiter = b'\n' use_ok = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.log = logger.new() def lineReceived(self, line: bytes) -> None: self.log.debug('line received', line=line) try: data = json_loadb(line) except JSONDecodeError: return self.send_error(PARSE_ERROR, data={'message': repr(line)}) assert isinstance(data, dict) msgid = data.get('id') if 'method' in data: return self.handle_request(data['method'], data.get('params'), msgid) elif 'result' in data and 'error' in data: if data['result'] and data['error'] is None: return self.handle_result(data['result'], msgid) elif data['error'] and data['result'] is None: return self.handle_error(data['error'], data.get('data'), msgid) elif 'result' in data: return self.handle_result(data['result'], msgid) elif 'error' in data: return self.handle_error(data['error'], data.get('data'), msgid) return self.send_error( INVALID_REQUEST, data={ 'message': data, 'error': 'Could not identify message as request, result or error.' }) @abstractmethod def handle_request(self, method: str, params: Optional[Union[List, Dict]], msgid: Optional[str]) -> None: raise NotImplementedError @abstractmethod def handle_result(self, result: Any, msgid: Optional[str]) -> None: raise NotImplementedError @abstractmethod def handle_error(self, error: Dict, data: Any, msgid: Optional[str]) -> None: raise NotImplementedError def send_request(self, method: str, params: Optional[Union[List, Dict]], msgid: Union[str, int, None] = None, ok: Optional[bool] = None) -> None: data: Dict[str, Any] = {'method': method, 'params': params} self.log.debug('send request', method=method, params=params) data['id'] = msgid if ok is True: data['result'] = 'ok' if self.use_ok else True self.send_json(data) def send_result(self, result: Any, msgid: Optional[str]) -> None: data = {'result': result, 'error': None} if msgid is not None: data['id'] = msgid self.log.debug('send result', data=data) return self.send_json(data) def send_error(self, error: Dict, msgid: Optional[str] = None, data: Any = None) -> None: message = {'error': error, 'data': data} if msgid is not None: message['id'] = msgid self.log.info('send_error', error=error, data=data) self.send_json(message) if error['code'] <= UNRECOVERABLE_ERROR_CODE_MAX and self.transport is not None: self.transport.loseConnection() def send_json(self, json: Dict) -> None: try: message = json_dumpb(json) self.log.debug('send line', line=message) self.sendLine(message) except TypeError: self.log.error('failed to encode', json=json) class StratumProtocol(JSONRPC): JOBS_HISTORY = 100 AVERAGE_JOB_TIME = 5 BLOCK_MAXIMUM_JOB_TIME = 15 TX_MAXIMUM_JOB_TIME = 1 address: IAddress current_job: Optional[ServerJob] jobs: Dict[UUID, ServerJob] job_ids: List[UUID] factory: 'StratumFactory' manager: 'HathorManager' miner_id: Optional[UUID] miner_address: Optional[bytes] estimated_hash_rate: float completed_jobs: int connection_start_time: int blocks_found: int merged_mining: bool def __init__(self, factory: 'StratumFactory', manager: 'HathorManager', address: IAddress, id_generator: Optional[Callable[[], Iterator[Union[str, int]]]] = lambda: count()): self.log = logger.new(address=address) self.factory = factory self.manager = manager self.address = address self.current_job = None self.jobs = {} self.miner_id = None self.miner_address = None self.job_ids = [] self.mine_txs = settings.STRATUM_MINE_TXS_DEFAULT self.estimated_hash_rate = 0.0 self.completed_jobs = 0 self.connection_start_time = 0 self.blocks_found = 0 self._iter_id = id_generator and id_generator() or None self.subscribed = False def _next_id(self): if self._iter_id: return str(next(self._iter_id)) def connectionMade(self) -> None: self.miner_id = uuid4() self.connection_start_time = self.factory.get_current_timestamp() self.log = self.log.bind(miner_id=self.miner_id, conn_at=self.connection_start_time, address=self.address) self.log.debug('new connection') def connectionLost(self, reason: Failure = None) -> None: if self.subscribed: self.log.info('miner disconnected') assert self.miner_id is not None self.factory.miner_protocols.pop(self.miner_id, None) def handle_request(self, method: str, params: Optional[Union[List, Dict]], msgid: Optional[str]) -> None: self.log.debug('handle request', msgid=msgid, method=method, params=params) if method in ['mining.subscribe', 'subscribe', 'mining.submit', 'submit']: if not self.manager.can_start_mining(): return self.send_error(NODE_SYNCING, msgid) if method in ['mining.subscribe', 'subscribe']: params = cast(Dict, params) return self.handle_subscribe(params, msgid) if method in ['mining.submit', 'submit']: params = cast(Dict, params) return self.handle_submit(params, msgid) self.send_error(METHOD_NOT_FOUND, msgid, data={'method': method, 'supported_methods': ['submit', 'subscribe']}) def handle_result(self, result: Any, msgid: Optional[str]) -> None: self.log.debug('handle result', msgid=msgid, result=result) def handle_error(self, error: Dict, data: Any, msgid: Optional[str]) -> None: self.log.error('handle error', msgid=msgid, error=error) def handle_subscribe(self, params: Dict, msgid: Optional[str]) -> None: assert self.miner_id is not None self.log.debug('handle subscribe', msgid=msgid, params=params) if params and 'address' in params and params['address'] is not None: try: address = params['address'] self.miner_address = decode_address(address) self.log.debug('miner with address', id=self.miner_id, address=address) except InvalidAddress: self.send_error(INVALID_ADDRESS, msgid) self.transport.loseConnection() return if params and 'mine_txs' in params: self.mine_txs = params['mine_txs'] if params and 'merged_mining' in params: self.merged_mining = params['merged_mining'] else: self.merged_mining = False if params and params.get('mine_txs') and params.get('merged_mining'): err = INVALID_PARAMS.copy() err['message'] = 'Cannot set both merged_mining=True and mine_txs=True' return self.send_error(err, msgid) if self.merged_mining: self.log.debug('merged_mining=True implies mine_txs=False') self.mine_txs = False self.factory.miner_protocols[self.miner_id] = self self.log.info('miner subscribed', address=self.miner_address, mine_txs=self.mine_txs, merged_mining=self.merged_mining) self.send_result('ok', msgid) self.subscribed = True self.job_request() def handle_submit(self, params: Dict, msgid: Optional[str]) -> None: from hathor.merged_mining.bitcoin import sha256d_hash self.log.debug('handle submit', msgid=msgid, params=params) if 'job_id' not in params or 'nonce' not in params: return self.send_error(INVALID_PARAMS, msgid, {'params': params, 'required': ['job_id', 'nonce']}) if not valid_uuid(params['job_id']): return self.send_error(INVALID_PARAMS, msgid, { 'job_id': params['job_id'], 'message': 'job_id is invalid uuid4' }) job_id = UUID(params['job_id']) job = self.jobs.get(job_id) if job is None: return self.send_error(JOB_NOT_FOUND, msgid, { 'current_job': self.current_job and self.current_job.id.hex, 'job_id': job_id.hex }) if job is not self.current_job or job.submitted is not None: return self.send_error(STALE_JOB, msgid, { 'current_job': self.current_job and self.current_job.id.hex, 'job_id': job_id.hex }) tx = job.tx.clone() block_base = tx.get_header_without_nonce() block_base_hash = sha256d_hash(block_base) if params.get('aux_pow'): assert isinstance(tx, MergeMinedBlock), 'expected MergeMinedBlock got ' + type(tx).__name__ tx.aux_pow = BitcoinAuxPow.from_bytes(bytes.fromhex(params['aux_pow'])) tx.nonce = 0 else: tx.nonce = int(params['nonce'], 16) tx.update_hash() assert tx.hash is not None self.log.debug('share received', block=tx, block_base=block_base.hex(), block_base_hash=block_base_hash.hex()) try: tx.verify_pow(job.weight) except PowError: self.log.error('bad share, discard', job_weight=job.weight, tx=tx) return self.send_error(INVALID_SOLUTION, msgid, { 'hash': tx.hash.hex(), 'target': int(tx.get_target()).to_bytes(32, 'big').hex() }) job.submitted = self.factory.get_current_timestamp() self.completed_jobs += 1 self.send_result('ok', msgid) self.manager.reactor.callLater(0, self.job_request) try: tx.verify_pow() except PowError: self.log.info('high hash, keep mining', tx=tx) return else: self.log.info('low hash, new block candidate', tx=tx) if isinstance(tx, Block): try: self.manager.submit_block(tx, fails_silently=False) self.blocks_found += 1 except (InvalidNewTransaction, TxValidationError) as e: self.log.warn('block propagation failed', block=tx, error=e) else: self.log.info('new block found', block=tx) elif isinstance(tx, Transaction): self.log.info('transaction mined', tx=tx) funds_hash = tx.get_funds_hash() if funds_hash in self.factory.mining_tx_pool: self.factory.mined_txs[funds_hash] = tx del self.factory.mining_tx_pool[funds_hash] if funds_hash in self.factory.tx_queue: self.factory.tx_queue.remove(funds_hash) if funds_hash in self.factory.deferreds_tx: d = self.factory.deferreds_tx.pop(funds_hash) d.callback(tx) else: assert False, 'tx should either be a Block or Transaction' def job_request(self) -> None: try: job = self.create_job() except (ValueError, ScriptError) as e: self.send_error(INVALID_PARAMS, data={ 'message': str(e) }) else: if job: job_data = { 'data': job.tx.get_header_without_nonce().hex(), 'job_id': job.id.hex, 'nonce_size': job.tx.SERIALIZATION_NONCE_SIZE, 'weight': float(job.weight), } if job.tx.is_block: assert isinstance(job.tx, Block) job_data['parent_hash'] = job.tx.get_block_parent_hash().hex() self.send_request('job', job_data, self._next_id()) def create_job(self) -> ServerJob: assert self.miner_id is not None self.cancel_current_job_timeout() jobid = uuid4() tx = self.create_job_tx(jobid) job = ServerJob(jobid, self.factory.get_current_timestamp(), self.miner_id, tx, 0.0) self.current_job = job self.jobs[job.id] = job self.job_ids.append(job.id) share_weight = self.calculate_share_weight() job.weight = min(share_weight, tx.weight) def jobTimeout(job: ServerJob, protocol: StratumProtocol) -> None: if job is protocol.current_job and job.submitted is None: if self.miner_id in self.factory.miner_protocols: protocol.job_request() timeout = self.BLOCK_MAXIMUM_JOB_TIME if tx.is_block else self.TX_MAXIMUM_JOB_TIME job.timeoutTask = self.manager.reactor.callLater(timeout, jobTimeout, job, self) if len(self.job_ids) > self.JOBS_HISTORY: del self.jobs[self.job_ids.pop(0)] return job
Apache License 2.0
jasonmcintosh/rabbitmq-zabbix
scripts/rabbitmq/api.py
RabbitMQAPI.check_aliveness
python
def check_aliveness(self): return self.call_api('aliveness-test/%2f')['status']
Check the aliveness status of a given vhost.
https://github.com/jasonmcintosh/rabbitmq-zabbix/blob/8ecadfdd2cab6154eb7ab73ce4e7bb39b21b61c1/scripts/rabbitmq/api.py#L209-L211
from __future__ import unicode_literals import io import json import optparse import socket import urllib2 import subprocess import os import logging class RabbitMQAPI(object): def __init__(self, user_name='guest', password='guest', host_name='', port=15672, conf='/etc/zabbix/zabbix_agentd.conf', senderhostname=None, protocol='http'): self.user_name = user_name self.password = password self.host_name = host_name or socket.gethostname() self.port = port self.conf = conf or '/etc/zabbix/zabbix_agentd.conf' self.senderhostname = senderhostname or socket.gethostname() self.protocol = protocol or 'http' def call_api(self, path): url = '{0}://{1}:{2}/api/{3}'.format(self.protocol, self.host_name, self.port, path) password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, url, self.user_name, self.password) handler = urllib2.HTTPBasicAuthHandler(password_mgr) logging.debug('Issue a rabbit API call to get data on ' + path + " against " + self.host_name) logging.debug('Full URL:' + url) return json.loads(urllib2.build_opener(handler).open(url).read()) def list_queues(self, filters=None): queues = [] if not filters: filters = [{}] for queue in self.call_api('queues'): logging.debug("Discovered queue " + queue['name'] + ", checking to see if it's filtered...") for _filter in filters: check = [(x, y) for x, y in queue.items() if x in _filter] shared_items = set(_filter.items()).intersection(check) if len(shared_items) == len(_filter): element = {'{#NODENAME}': queue['node'], '{#VHOSTNAME}': queue['vhost'], '{#QUEUENAME}': queue['name']} queues.append(element) logging.debug('Discovered queue '+queue['vhost']+'/'+queue['name']) break return queues def list_shovels(self, filters=None): shovels = [] if not filters: filters = [{}] try: for shovel in self.call_api('shovels'): logging.debug("Discovered shovel " + shovel['name'] + ", checking to see if it's filtered...") for _filter in filters: check = [(x, y) for x, y in shovel.items() if x in _filter] shared_items = set(_filter.items()).intersection(check) if len(shared_items) == len(_filter): element = {'{#VHOSTNAME}': shovel['vhost'], '{#SHOVELNAME}': shovel['name']} shovels.append(element) logging.debug('Discovered shovel '+shovel['vhost']+'/'+shovel['name']) break return shovels except urllib2.HTTPError as err: if err.code == 404: return shovels else: raise err def list_nodes(self): nodes = [] for node in self.call_api('nodes'): name = node['name'].split('@')[1] element = {'{#NODENAME}': name, '{#NODETYPE}': node['type']} nodes.append(element) logging.debug('Discovered nodes '+name+'/'+node['type']) return nodes def check_queue(self, filters=None): return_code = 0 if not filters: filters = [{}] buffer = io.StringIO() try: for queue in self.call_api('queues'): success = False logging.debug("Filtering out by " + str(filters)) for _filter in filters: check = [(x, y) for x, y in queue.items() if x in _filter] shared_items = set(_filter.items()).intersection(check) if len(shared_items) == len(_filter): success = True break if success: self._prepare_data(queue, buffer) except urllib2.HTTPError as err: if err.code == 404: buffer.close() return return_code else: raise err return_code = self._send_data(buffer) buffer.close() return return_code def check_shovel(self, filters=None): return_code = 0 if not filters: filters = [{}] buffer = io.StringIO() try: for shovel in self.call_api('shovels'): success = False logging.debug("Filtering out by " + str(filters)) for _filter in filters: check = [(x, y) for x, y in shovel.items() if x in _filter] shared_items = set(_filter.items()).intersection(check) if len(shared_items) == len(_filter): success = True break if success: key = '"rabbitmq.shovels[{0},shovel_{1},{2}]"' key = key.format(shovel['vhost'], 'state', shovel['name']) value = shovel.get('state', 0) logging.debug("SENDER_DATA: - %s %s" % (key,value)) buffer.write("- %s %s\n" % (key, value)) except urllib2.HTTPError as err: if err.code == 404: buffer.close() return return_code else: raise err return_code = self._send_data(buffer) buffer.close() return return_code def _prepare_data(self, queue, file): for item in ['memory', 'messages', 'messages_unacknowledged', 'consumers']: key = '"rabbitmq.queues[{0},queue_{1},{2}]"' key = key.format(queue['vhost'], item, queue['name']) value = queue.get(item, 0) logging.debug("SENDER_DATA: - %s %s" % (key,value)) file.write("- %s %s\n" % (key, value)) for item in ['deliver_get', 'publish', 'ack']: key = '"rabbitmq.queues[{0},queue_message_stats_{1},{2}]"' key = key.format(queue['vhost'], item, queue['name']) value = queue.get('message_stats', {}).get(item, 0) logging.debug("SENDER_DATA: - %s %s" % (key,value)) file.write("- %s %s\n" % (key, value)) def _send_data(self, file): args = 'zabbix_sender -vv -c {0} -i -' if self.senderhostname: args = args + " -s '%s' " % self.senderhostname return_code = 0 process = subprocess.Popen(args.format(self.conf), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate(input=file.getvalue()) logging.debug("Finished sending data") return_code = process.wait() logging.info("Found return code of " + str(return_code)) if return_code == 1: logging.error(out) logging.error(err) else: logging.debug(err) logging.debug(out) return return_code
Apache License 2.0
bennylope/django-organizations
src/organizations/utils.py
model_field_attr
python
def model_field_attr(model, model_field, attr): fields = dict([(field.name, field) for field in model._meta.fields]) return getattr(fields[model_field], attr)
Returns the specified attribute for the specified field on the model class.
https://github.com/bennylope/django-organizations/blob/55808ad7e4b23ef4612c9226cadce46d514c9a79/src/organizations/utils.py#L89-L94
from itertools import chain def default_org_model(): from organizations.models import Organization return Organization def model_field_names(model): return list( set( chain.from_iterable( (field.name, field.attname) if hasattr(field, "attname") else (field.name,) for field in model._meta.get_fields() if not (field.many_to_one and field.related_model is None) ) ) ) def create_organization( user, name, slug=None, is_active=None, org_defaults=None, org_user_defaults=None, **kwargs ): org_model = ( kwargs.pop("model", None) or kwargs.pop("org_model", None) or default_org_model() ) kwargs.pop("org_user_model", None) org_owner_model = org_model.owner.related.related_model org_user_model = org_model.organization_users.rel.related_model if org_defaults is None: org_defaults = {} if org_user_defaults is None: if "is_admin" in model_field_names(org_user_model): org_user_defaults = {"is_admin": True} else: org_user_defaults = {} if slug is not None: org_defaults.update({"slug": slug}) if is_active is not None: org_defaults.update({"is_active": is_active}) org_defaults.update({"name": name}) organization = org_model.objects.create(**org_defaults) org_user_defaults.update({"organization": organization, "user": user}) new_user = org_user_model.objects.create(**org_user_defaults) org_owner_model.objects.create( organization=organization, organization_user=new_user ) return organization
BSD 2-Clause Simplified License
westpa/westpa
lib/west_tools/westtools/wipi.py
__get_data_for_iteration__.successful_trajectories
python
def successful_trajectories(self): state_changes = np.where(self.raw['states'][:,:-1] != self.raw['states'][:,1:]) walkers = state_changes[0] new_states = state_changes[1] + 1 old_states = state_changes[1] walker = {} for z, (i, j) in enumerate(zip(old_states, new_states)): istate = self.raw['states'][walkers[z], i] jstate = self.raw['states'][walkers[z], j] try: walker[istate,jstate].append(walkers[z]) except: walker[istate,jstate] = [walkers[z]] walker = WIPIDataset(raw=walker, key=None) return walker
Returns which trajectories are successful.
https://github.com/westpa/westpa/blob/cda177c5dea2cee571d71c4b04fcc625dc5f689c/lib/west_tools/westtools/wipi.py#L427-L450
import numpy as np import os, sys import scipy.sparse as sp from westtools import Plotter import itertools class WIPIDataset(object): def __init__(self, raw, key): self.__dict__ = {} self.raw = raw self.name = key def __repr__(self): if isinstance(self.__dict__['raw'], dict): return repr(self.__dir__()) else: return repr(self.raw) def __getitem__(self, value): if not isinstance(value, str): return self.__dict__['raw'][value] if value in list(self.__dict__['raw'].keys()): return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in dir(self.__dict__['raw']): return getattr(self.__dict__['raw'], value) else: return self.__getitem__(value) def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) remove = ['raw', 'name', '__dict__', 'plotter'] for i in remove: try: dict_keys.remove(str(i)) except: pass if isinstance(self.__dict__['raw'], dict): return sorted(set(list(self.raw.keys()) + dict_keys)) else: return sorted(set(dict_keys)) def keys(self): print(self.__dir__()) def __add__(self, other): return self.__dict__['raw'] + other def __radd__(self, other): return other + self.__dict__['raw'] def __sub__(self, other): return self.__dict__['raw'] - other def __rsub__(self, other): return other - self.__dict__['raw'] def __mul__(self, other): return self.__dict__['raw'] * other def __rmul__(self, other): return other * self.__dict__['raw'] def __truediv__(self, other): return self.__dict__['raw'] / other def __floordiv__(self, other): return self.__dict__['raw'] // other def __rtruediv__(self, other): return other / self.__dict__['raw'] def __mod__(self, other): return self.__dict__['raw'] % other def __pow__(self, other): return self.__dict__['raw'] ** other def __lshift__(self, other): return self.__dict__['raw'] << other def __rshift__(self, other): return self.__dict__['raw'] >> other def __and__(self, other): return self.__dict__['raw'] & other def __eq__(self, other): return self.__dict__['raw'] == other def __ne__(self, other): return self.__dict__['raw'] != other def __lt__(self, other): return self.__dict__['raw'] < other def __gt__(self, other): return self.__dict__['raw'] > other def __le__(self, other): return self.__dict__['raw'] <= other def __ge__(self, other): return self.__dict__['raw'] >= other def __xor__(self, other): return self.__dict__['raw'] ^ other def __or__(self, other): return self.__dict__['raw'] | other class KineticsIteration(object): def __init__(self, kin_h5file, index, assign, iteration=-1): self.__dict__ = {} self.h5file = kin_h5file _2D_h5keys = [ 'conditional_flux_evolution', 'rate_evolution' ] _1D_h5keys = [ 'state_pop_evolution', 'color_prob_evolution', 'target_flux_evolution' ] for key in _2D_h5keys: try: self.__dict__[key] = self.__2D_with_error__(key, index, assign) except: self.__dict__[key] = None for key in _1D_h5keys: try: self.__dict__[key] = self.__1D_with_error__(key, index, assign) except: self.__dict__[key] = None try: self.__dict__['total_fluxes'] = WIPIDataset(raw=np.array(self.h5file['total_fluxes']), key='total_fluxes') except: pass def __repr__(self): return repr(self.__dir__()) def __getitem__(self, value): if value in list(self.__dict__.keys()): return self.__dict__[value] def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in list(self.__dict__.keys()): return self.__dict__[value] def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) remove = [ 'h5file', '__dict__'] for i in remove: try: dict_keys.remove(str(i)) except: pass return sorted(set(dict_keys)) def keys(self): print(self.__dir__()) class __custom_dataset__(object): def __init__(self, raw, assign, key): self.__dict__ = {} self.raw = raw self.name = key self.assign = assign self.nstates = assign.attrs['nstates'] self.dim = len(raw.shape) def __repr__(self): return repr(self.__dir__()) def __getitem__(self, value): if value in self.__dict__['raw'].dtype.names: return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in self.__dict__['raw'].dtype.names: return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) remove = ['assign', 'dim', 'nstates', 'plotter', '__dict__'] for i in remove: try: dict_keys.remove(str(i)) except: pass return sorted(set(list(self.raw.dtype.names) + dict_keys)) def keys(self): print(self.__dir__()) def _repr_pretty_(self, p, cycle): if self.dim == 1: return self._1D_repr_pretty_(p, cycle) if self.dim == 2: return self._2D_repr_pretty_(p, cycle) def _1D_repr_pretty_(self, p, cycle): maxlabellen = max(list(map(len,self.assign['state_labels']))) p.text('') p.text('{name} data:\n'.format(name=self.name)) for istate in range(self.nstates): p.text('{:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1\n' .format(self.assign['state_labels'][istate], self.raw['expected'][istate], self.raw['ci_lbound'][istate], self.raw['ci_ubound'][istate], maxlabellen=maxlabellen)) p.text('To access data, index via the following names:\n') p.text(str(self.__dir__())) return " " def _2D_repr_pretty_(self, p, cycle): maxlabellen = max(list(map(len,self.assign['state_labels']))) p.text('') p.text('{name} data:\n'.format(name=self.name)) for istate in range(self.nstates): for jstate in range(self.nstates): if istate == jstate: continue p.text('{:{maxlabellen}s} -> {:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1\n' .format(self.assign['state_labels'][istate], self.assign['state_labels'][jstate], self.raw['expected'][istate, jstate], self.raw['ci_lbound'][istate, jstate], self.raw['ci_ubound'][istate, jstate], maxlabellen=maxlabellen)) p.text('To access data, index via the following names:\n') p.text(str(self.__dir__())) return " " def __2D_with_error__(self, h5key, index, assign): self.step_iter = (self.h5file[h5key]['iter_stop'][0] - self.h5file[h5key]['iter_start'][0])[1,0] value = ((index-self.h5file.attrs['iter_start']) // self.step_iter) if value < 0: value = 0 raw = self.h5file[h5key][value, :, :] error = (raw['ci_ubound'] - raw['ci_lbound']) / (2*raw['expected']) expected = raw['expected'] raw = self.__custom_dataset__(raw, assign, h5key) raw.error = error raw.plotter = Plotter(self.h5file, h5key, iteration=value, interface='text') raw.plot = raw.plotter.plot return raw def __1D_with_error__(self, h5key, index, assign): self.step_iter = (self.h5file[h5key]['iter_stop'][0] - self.h5file[h5key]['iter_start'][0])[1] value = ((index-self.h5file.attrs['iter_start']) // self.step_iter) if value < 0: value = 0 raw = self.h5file[h5key][value, :] error = (raw['ci_ubound'] - raw['ci_lbound']) / (2*raw['expected']) expected = raw['expected'] raw = self.__custom_dataset__(raw, assign, h5key) raw.error = error raw.plotter = Plotter(self.h5file, h5key, iteration=value, interface='text') raw.plot = raw.plotter.plot return raw class __get_data_for_iteration__(object): def __init__(self, parent, value, seg_ids = None): self.__dict__ = {} iter_group = parent.data_reader.get_iter_group(value) self.parent = parent current = {} current['iteration'] = value if seg_ids is None: seg_ids = range(0, iter_group['seg_index']['weight'].shape[0]) current['weights'] = iter_group['seg_index']['weight'][seg_ids] current['pcoord'] = iter_group['pcoord'][...][seg_ids, :, :] try: current['auxdata'] = {} for key in list(iter_group['auxdata'].keys()): current['auxdata'][key] = iter_group['auxdata'][key][...][seg_ids, :] except: pass current['parents'] = iter_group['seg_index']['parent_id'][seg_ids] current['summary'] = parent.data_reader.data_manager.get_iter_summary(int(value)) current['seg_id'] = np.array(list(range(0, iter_group['seg_index'].shape[0])))[seg_ids] current['walkers'] = current['summary']['n_particles'] current['states'] = parent.assign['trajlabels'][value-1, :current['walkers'], :][seg_ids] current['bins'] = parent.assign['assignments'][value-1, :current['walkers'], :][seg_ids] nbins = parent.assign['state_map'].shape[0] nstates = parent.assign['state_labels'].shape[0] + 1 current['direct'] = KineticsIteration(parent.direct, value, parent.assign, value) evolution_datasets = [ 'rate_evolution', 'conditional_flux_evolution', 'state_pop_evolution', 'color_prob_evolution' , 'total_fluxes', 'target_flux_evolution'] try: current['reweight'] = KineticsIteration(parent.reweight, value, parent.assign, value) matrix = parent.reweight['iterations/iter_{:08d}'.format(value)] current['instant_matrix'] = sp.coo_matrix((matrix['flux'][...], (matrix['rows'][...], matrix['cols'][...])), shape=((nbins-1)*2, (nbins-1)*2)).todense() reweighting = True except: current['reweight'] = parent.reweight['rate_evolution'] current['instant_matrix'] = parent.reweight['bin_populations'] current['matrix'] = parent.reweight['bin_populations'] reweighting = False if reweighting: for key in evolution_datasets: current[key] = WIPIDataset(raw={ 'direct': current['direct'][key], 'reweight': current['reweight'][key] }, key='a') else: for key in evolution_datasets: current[key] = WIPIDataset(raw={ 'direct': current['direct'][key] }, key='direct') self.raw = current def __repr__(self): return repr(self.__dir__()) def keys(self): return list(self.__dict__['raw'].keys()) def __setitem__(self, key, value): self.__dict__[key] = value def __getattr__(self, value): if value in list(self.__dict__['raw'].keys()): return self.__dict__['raw'][value] elif value in list(self.__dict__.keys()): return self.__dict__[value] def __setattr__(self, key, value): self.__dict__[key] = value def __dir__(self): dict_keys = list(self.__dict__.keys()) dict_keys += ['maxweight', 'minweight', 'walkers', 'aggregate_walkers', 'successful_trajectories'] remove = ['__dict__'] for i in remove: try: dict_keys.remove(str(i)) except: pass return sorted(set(list(self.__dict__['raw'].keys()) + dict_keys)) @property def maxweight(self): walker = np.where(self.raw['weights'] == np.max(self.raw['weights']))[0][0] return self.__getitem__(walker) @property def minweight(self): walker = np.where(self.raw['weights'] == np.min(self.raw['weights']))[0][0] return self.__getitem__(walker) @property
MIT License
hszhao/pointweb
lib/pointops/functions/pointops.py
pairwise_distances
python
def pairwise_distances(x, y=None): x_norm = (x ** 2).sum(1).view(-1, 1) if y is not None: y_t = torch.transpose(y, 0, 1) y_norm = (y ** 2).sum(1).view(1, -1) else: y_t = torch.transpose(x, 0, 1) y_norm = x_norm.view(1, -1) dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t) import numpy as np return torch.clamp(dist, 0.0, np.inf)
Input: x is a Nxd matrix y is an optional Mxd matirx Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:] if y is not given then use 'y=x'. i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
https://github.com/hszhao/pointweb/blob/f31fe05616c3c068f6c1870170a3caaf1f7d8abb/lib/pointops/functions/pointops.py#L346-L363
from typing import Tuple import torch from torch.autograd import Function import torch.nn as nn import pointops_cuda class FurthestSampling(Function): @staticmethod def forward(ctx, xyz, m): assert xyz.is_contiguous() b, n, _ = xyz.size() idx = torch.cuda.IntTensor(b, m) temp = torch.cuda.FloatTensor(b, n).fill_(1e10) pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx) return idx @staticmethod def backward(xyz, a=None): return None, None furthestsampling = FurthestSampling.apply class Gathering(Function): @staticmethod def forward(ctx, features, idx): assert features.is_contiguous() assert idx.is_contiguous() b, c, n = features.size() m = idx.size(1) output = torch.cuda.FloatTensor(b, c, m) pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output) ctx.for_backwards = (idx, c, n) return output @staticmethod def backward(ctx, grad_out): idx, c, n = ctx.for_backwards b, m = idx.size() grad_features = torch.cuda.FloatTensor(b, c, n).zero_() grad_out_data = grad_out.data.contiguous() pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data) return grad_features, None gathering = Gathering.apply class NearestNeighbor(Function): @staticmethod def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: assert unknown.is_contiguous() assert known.is_contiguous() b, n, _ = unknown.size() m = known.size(1) dist2 = torch.cuda.FloatTensor(b, n, 3) idx = torch.cuda.IntTensor(b, n, 3) pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx) return torch.sqrt(dist2), idx @staticmethod def backward(ctx, a=None, b=None): return None, None nearestneighbor = NearestNeighbor.apply class Interpolation(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: assert features.is_contiguous() assert idx.is_contiguous() assert weight.is_contiguous() b, c, m = features.size() n = idx.size(1) ctx.interpolation_for_backward = (idx, weight, m) output = torch.cuda.FloatTensor(b, c, n) pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output) return output @staticmethod def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: idx, weight, m = ctx.interpolation_for_backward b, c, n = grad_out.size() grad_features = torch.cuda.FloatTensor(b, c, m).zero_() grad_out_data = grad_out.data.contiguous() pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data) return grad_features, None, None interpolation = Interpolation.apply class Grouping(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: assert features.is_contiguous() assert idx.is_contiguous() b, c, n = features.size() _, m, nsample = idx.size() output = torch.cuda.FloatTensor(b, c, m, nsample) pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output) ctx.for_backwards = (idx, n) return output @staticmethod def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: idx, n = ctx.for_backwards b, c, m, nsample = grad_out.size() grad_features = torch.cuda.FloatTensor(b, c, n).zero_() grad_out_data = grad_out.data.contiguous() pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data) return grad_features, None grouping = Grouping.apply class GroupingInt(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: assert features.is_contiguous() assert idx.is_contiguous() b, c, n = features.size() _, m, nsample = idx.size() output = torch.cuda.LongTensor(b, c, m, nsample) pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output) return output @staticmethod def backward(ctx, a=None): return None, None grouping_int = GroupingInt.apply class BallQuery(Function): @staticmethod def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor: assert xyz.is_contiguous() assert new_xyz.is_contiguous() b, n, _ = xyz.size() m = new_xyz.size(1) idx = torch.cuda.IntTensor(b, m, nsample).zero_() pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx) return idx @staticmethod def backward(ctx, a=None): return None, None, None, None ballquery = BallQuery.apply class FeatureDistribute(Function): @staticmethod def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor: assert max_xyz.is_contiguous() assert xyz.is_contiguous() b, n, _ = max_xyz.size() m = xyz.size(1) distribute_idx = torch.cuda.IntTensor(b, m).zero_() pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx) return distribute_idx @staticmethod def backward(ctx, a=None): return None, None featuredistribute = FeatureDistribute.apply class FeatureGather(Function): @staticmethod def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor: assert max_feature.is_contiguous() assert distribute_idx.is_contiguous() b, c, n = max_feature.size() m = distribute_idx.size(1) distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_() pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature) ctx.for_backwards = (distribute_idx, n) return distribute_feature @staticmethod def backward(ctx, grad_distribute_feature: torch.Tensor): distribute_idx, n = ctx.for_backwards b, c, m = grad_distribute_feature.size() grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_() grad_distribute_feature_data = grad_distribute_feature.data.contiguous() pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data) return grad_max_feature, None featuregather = FeatureGather.apply class LabelStatBallRange(Function): @staticmethod def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor: assert xyz.is_contiguous() assert new_xyz.is_contiguous() assert label_stat.is_contiguous() b, n, nclass = label_stat.size() m = new_xyz.size(1) new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_() pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat) return new_label_stat @staticmethod def backward(ctx, a=None): return None, None, None, None labelstat_ballrange = LabelStatBallRange.apply class LabelStatIdx(Function): @staticmethod def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: assert label_stat.is_contiguous() assert idx.is_contiguous() b, n, nclass = label_stat.size() m = idx.size(1) new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_() pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat) return new_label_stat @staticmethod def backward(ctx, a=None): return None, None, None labelstat_idx = LabelStatIdx.apply class LabelStatAndBallQuery(Function): @staticmethod def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor): assert xyz.is_contiguous() assert new_xyz.is_contiguous() assert label_stat.is_contiguous() b, n, nclass = label_stat.size() m = new_xyz.size(1) new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_() idx = torch.cuda.IntTensor(b, m, nsample).zero_() pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat) return new_label_stat, idx @staticmethod def backward(ctx, a=None, b=None): return None, None, None, None, None labelstat_and_ballquery = LabelStatAndBallQuery.apply
MIT License
probcomp/bayeslite
src/core.py
bayesdb_table_has_column
python
def bayesdb_table_has_column(bdb, table, name): bayesdb_table_guarantee_columns(bdb, table) sql = 'SELECT COUNT(*) FROM bayesdb_column WHERE tabname = ? AND name = ?' return cursor_value(bdb.sql_execute(sql, (table, name)))
True if the table named `table` has a column named `name`. `bdb` must have a table named `table`. If you're not sure, call :func:`bayesdb_has_table` first. WARNING: This may modify the database by populating the ``bayesdb_column`` table if it has not yet been populated.
https://github.com/probcomp/bayeslite/blob/211e5eb3821a464a2fffeb9d35e3097e1b7a99ba/src/core.py#L82-L93
from bayeslite.exception import BQLError from bayeslite.sqlite3_util import sqlite3_quote_name from bayeslite.util import casefold from bayeslite.util import cursor_value def bayesdb_has_table(bdb, name): qt = sqlite3_quote_name(name) cursor = bdb.sql_execute('PRAGMA table_info(%s)' % (qt,)) try: cursor.next() except StopIteration: return False else: return True def bayesdb_table_column_names(bdb, table): bayesdb_table_guarantee_columns(bdb, table) sql = ''' SELECT name FROM bayesdb_column WHERE tabname = ? ORDER BY colno ASC ''' return [str(row[0]) for row in bdb.sql_execute(sql, (table,))]
Apache License 2.0
zagaran/mongolia
mongolia/database_collection.py
DatabaseCollection.__init__
python
def __init__(self, path=None, objtype=None, query=None, sort_by=ID_KEY, ascending=True, page=0, page_size=None, read_only=False, projection=None, field=None, **kwargs): if objtype: self.OBJTYPE = objtype if path: self.PATH = path if not query: query = kwargs if field: if field == ID_KEY: projection = [ID_KEY] else: projection = {field: True, ID_KEY: False} if projection: read_only = True results = self.db().find(query, projection=projection) if isinstance(sort_by, list): results = results.sort(sort_by) elif sort_by: results = results.sort(sort_by, ASCENDING if ascending else DESCENDING) if page_size: results.limit(page_size).skip(page_size * page) if field: for result in results: if field in result: self.append(result[field]) elif read_only: for result in results: self.append(result) else: for result in results: self.append(self.OBJTYPE(path=self.PATH, _new_object=result))
Loads a list of DatabaseObjects from path matching query. If nothing matches the query (possibly because there is nothing in the specified mongo collection), the created DatabaseCollection will be an empty list and have bool(returned object) == False NOTE: The path and objtype parameters to this function are to allow use of the DatabaseCollection class directly. However, this class is intended for subclassing and children of it should override either the OBJTYPE or PATH attribute rather than passing them as parameters here. WARNING: if you are attempting to grab a particularly large set of results (such as an entire collection), your system may run out of memory. In the event of large collections, the number of returned results can be reduced either by filtering the results with the query parameter or by using pagination via the page_size and page parameters. If all you need to do is iterate over the collection, use the `iterator` classmethod. @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object or, if that is none, the PATH property of OBJTYPE @param objtype: the object type to use for these DatabaseObjects; pass None to use the OBJTYPE property of the class @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param sort_by: a key to use for the sort order of the results; ID_KEY by default. Can also be a list of pairs [(key, direction), ...], as in pymongo's sort function. If set to None, no sort operation is applied. @param ascending: whether to sort the results in ascending order of the sort_by key (if True) or descending order (if False). Ignored if sort_by is a list or None. @param page: the page number of results to return if pagination is being uses; note that if page_size is None, this parameter is ignored; page is 0-indexed @param page_size: returns only a single page of results, this defining the number of results in a page; see also the page parameter; if this is None or 0, paging is disabled @param read_only: returns the contents as python dictionaries rather than DatabaseObjects. This is not read_only in the sense that the returned objects are immutable, but in the sense that they have no attached .save() methods, so there is no way to write modifications to them back to the database. @param projection: specifies fields to return in a projection query. May be a list of field names, in which case the ID_KEY field is always returned whether or not listed. To prevent ID_KEY from being included, use a dict instead (see pymongo documentation for projection queries). Specifying this field implies read_only. @param field: returns simply the indicated field for each object rather than the entire object. For example, if ID_KEY is passed in for this parameter, only the ID's of the collection are returned, not the entire contents. This behaves similarly to read_only in that the returned objects cannot be saved to the database if they are updated. Objects that do not have the indicated field are omitted from results. Do not combine with the projection parameter. @param **kwargs: used as query parameters if query is None @raise Exception: if path, self.PATH, and self.OBJTYPE.PATH are all None; the database path must be defined in at least one of these
https://github.com/zagaran/mongolia/blob/18d921017123b9edb7b70b01d8cfb3eb491b4cb6/mongolia/database_collection.py#L67-L161
import json from pymongo import ASCENDING, DESCENDING from mongolia.constants import ID_KEY, GT from mongolia.database_object import DatabaseObject from mongolia.json_codecs import MongoliaJSONEncoder class DatabaseCollection(list): OBJTYPE = DatabaseObject PATH = None
MIT License
diofant/diofant
diofant/simplify/radsimp.py
rcollect
python
def rcollect(expr, *vars): if expr.is_Atom or not expr.has(*vars): return expr else: expr = expr.__class__(*[rcollect(arg, *vars) for arg in expr.args]) if expr.is_Add: return collect(expr, vars) else: return expr
Recursively collect sums in an expression. Examples ======== >>> expr = (x**2*y + x*y + x + y)/(x + y) >>> rcollect(expr, y) (x + y*(x**2 + x + 1))/(x + y) See Also ======== collect, collect_const, collect_sqrt
https://github.com/diofant/diofant/blob/05c50552b0e0533f1dbf2ec05e65b6c45b7e2c11/diofant/simplify/radsimp.py#L387-L412
from collections import defaultdict from ..core import (Add, Derivative, I, Integer, Mul, Pow, Rational, expand_mul, expand_power_base, gcd_terms, symbols) from ..core.compatibility import iterable from ..core.exprtools import Factors from ..core.function import _mexpand from ..core.mul import _keep_coeff, _unevaluated_Mul from ..core.sympify import sympify from ..functions import log, sqrt from ..polys import gcd from ..utilities import default_sort_key, ordered from .sqrtdenest import sqrtdenest def collect(expr, syms, func=None, evaluate=True, exact=False, distribute_order_term=True): def make_expression(terms): product = [] for term, rat, sym, deriv in terms: if deriv is not None: var, order = deriv while order > 0: term, order = Derivative(term, var), order - 1 if sym is None: if rat == 1: product.append(term) else: product.append(Pow(term, rat)) else: product.append(Pow(term, rat*sym)) return Mul(*product) def parse_derivative(deriv): expr, sym, order = deriv.expr, deriv.variables[0], 1 for s in deriv.variables[1:]: if s == sym: order += 1 else: raise NotImplementedError( 'Improve MV Derivative support in collect') while isinstance(expr, Derivative): s0 = expr.variables[0] for s in expr.variables: if s != s0: raise NotImplementedError( 'Improve MV Derivative support in collect') if s0 == sym: expr, order = expr.expr, order + len(expr.variables) else: break return expr, (sym, Rational(order)) def parse_term(expr): rat_expo, sym_expo = Integer(1), None sexpr, deriv = expr, None if expr.is_Pow: if isinstance(expr.base, Derivative): sexpr, deriv = parse_derivative(expr.base) else: sexpr = expr.base if expr.exp.is_Number: rat_expo = expr.exp else: coeff, tail = expr.exp.as_coeff_Mul() if coeff.is_Number: rat_expo, sym_expo = coeff, tail else: sym_expo = expr.exp elif isinstance(expr, Derivative): sexpr, deriv = parse_derivative(expr) return sexpr, rat_expo, sym_expo, deriv def parse_expression(terms, pattern): pattern = Mul.make_args(pattern) if len(terms) >= len(pattern): pattern = [parse_term(elem) for elem in pattern] terms = terms[:] elems, common_expo, has_deriv = [], None, False for elem, e_rat, e_sym, e_ord in pattern: if elem.is_Number and e_rat == 1 and e_sym is None: continue for j in range(len(terms)): if terms[j] is None: continue term, t_rat, t_sym, t_ord = terms[j] if t_ord is not None: has_deriv = True if (term.match(elem) is not None and (t_sym == e_sym or t_sym is not None and e_sym is not None and t_sym.match(e_sym) is not None)): if exact is False: expo = t_rat / e_rat if common_expo is None: common_expo = expo else: if common_expo != expo: common_expo = 1 else: if e_rat != t_rat or e_ord != t_ord: continue elems.append(terms[j]) terms[j] = None break else: return return [_f for _f in terms if _f], elems, common_expo, has_deriv if evaluate: if expr.is_Mul: return expr.func(*[ collect(term, syms, func, True, exact, distribute_order_term) for term in expr.args]) elif expr.is_Pow: b = collect( expr.base, syms, func, True, exact, distribute_order_term) return Pow(b, expr.exp) if iterable(syms): syms = [expand_power_base(i, deep=False) for i in syms] else: syms = [expand_power_base(syms, deep=False)] expr = sympify(expr) order_term = None if distribute_order_term: order_term = expr.getO() if order_term is not None: if order_term.has(*syms): order_term = None else: expr = expr.removeO() summa = [expand_power_base(i, deep=False) for i in Add.make_args(expr)] collected, disliked = defaultdict(list), Integer(0) for product in summa: terms = [parse_term(i) for i in Mul.make_args(product)] for symbol in syms: result = parse_expression(terms, symbol) if result is not None: terms, elems, common_expo, has_deriv = result if not has_deriv: index = 1 for elem in elems: e = elem[1] if elem[2] is not None: e *= elem[2] index *= Pow(elem[0], e) else: index = make_expression(elems) terms = expand_power_base(make_expression(terms), deep=False) index = expand_power_base(index, deep=False) collected[index].append(terms) break else: disliked += product collected = {k: Add(*v) for k, v in collected.items()} if disliked != 0: collected[Integer(1)] = disliked if order_term is not None: for key, val in collected.items(): collected[key] = val + order_term if func is not None: collected = {key: func(val) for key, val in collected.items()} if evaluate: return Add(*[key*val for key, val in collected.items()]) else: return collected
BSD 3-Clause New or Revised License
cityofsantamonica/mds-provider
mds/db/loaders.py
Records.load
python
def load(self, source, **kwargs): if isinstance(source, dict): source = [source] df = pd.DataFrame.from_records(source) super().load(df, **kwargs)
Load data from one or more MDS Provider records. Parameters: source: dict, list One or more dicts of type record_type. record_type: str The type of MDS data. table: str The name of the database table to insert this data into. engine: sqlalchemy.engine.Engine The engine used for connections to the database backend. Additional keyword arguments are passed-through to DataFrameLoader.load().
https://github.com/cityofsantamonica/mds-provider/blob/02abcb227c35cdfe78a39e35b3157f7c2916c028/mds/db/loaders.py#L193-L216
import string import pandas as pd from ..db import sql from ..fake import util from ..files import DataFile from ..schemas import STATUS_CHANGES, TRIPS, EVENTS, VEHICLES, Schema from ..versions import UnexpectedVersionError, Version class DataFrame(): def load(self, source, **kwargs): record_type = kwargs.pop("record_type") table = kwargs.pop("table") engine = kwargs.pop("engine") version = Version(kwargs.get("version", Version.mds_lower())) version.raise_if_unsupported() before_load = kwargs.get("before_load") stage_first = kwargs.get("stage_first") on_conflict_update = kwargs.get("on_conflict_update") if before_load is not None: transform = before_load(source, version) source = source if transform is None else transform if not stage_first: source.to_sql(table, engine, if_exists="append", index=False) return factor = stage_first if isinstance(stage_first, int) else 1 temp = f"{table}_tmp_{util.random_string(factor, chars=string.ascii_lowercase)}" source.to_sql(temp, engine, if_exists="replace", index=False) with engine.begin() as conn: if record_type in [STATUS_CHANGES, EVENTS]: query = sql.insert_status_changes_from(temp, table, version=version, on_conflict_update=on_conflict_update) elif record_type == TRIPS: query = sql.insert_trips_from(temp, table, version=version, on_conflict_update=on_conflict_update) elif record_type == VEHICLES: query = sql.insert_vehicles_from(temp, table, version=version, on_conflict_update=on_conflict_update) if query is not None: conn.execute(query) conn.execute(f"DROP TABLE {temp}") @classmethod def can_load(cls, source): return isinstance(source, pd.DataFrame) class File(DataFrame): def load(self, source, **kwargs): record_type = kwargs.get("record_type") version = Version(kwargs.get("version")) _version, df = DataFile(record_type, source).load_dataframe() if version and _version != version: raise UnexpectedVersionError(_version, version) return super().load(df, **kwargs) @classmethod def can_load(cls, source): try: return DataFile(source).file_sources except: return False class Records(DataFrame):
MIT License
google/cauliflowervest
cauliflowervest/server/handlers/maintenance.py
_update_schema
python
def _update_schema(model, cursor=None, num_updated=0): query = model.all() if cursor: query.with_cursor(cursor) updated = 0 for p in query.fetch(limit=_BATCH_SIZE): _reinsert_entity(model, p.key()) updated += 1 if updated > 0: num_updated += updated logging.info( 'Put %d %s entities to Datastore for a total of %d', updated, model.ESCROW_TYPE_NAME, num_updated) deferred.defer( _update_schema, model, cursor=query.cursor(), num_updated=num_updated, _queue=_QUEUE_NAME, _countdown=20) else: logging.info( 'UpdateSchema complete for %s with %d updates!', model.ESCROW_TYPE_NAME, num_updated)
Add tag field.
https://github.com/google/cauliflowervest/blob/d3f52501ebed8b9a392350c8e177bbc602a6a09d/cauliflowervest/server/handlers/maintenance.py#L41-L63
import httplib import logging from google.appengine.api import users from google.appengine.ext import db from google.appengine.ext import deferred from cauliflowervest import settings as base_settings from cauliflowervest.server.handlers import base_handler from cauliflowervest.server.models import base from cauliflowervest.server.models import util _BATCH_SIZE = 20 _QUEUE_NAME = 'serial' @db.transactional() def _reinsert_entity(model, entity_key): entity = model.get(entity_key) entity.tag = getattr(entity, 'tag', 'default') super(base.BasePassphrase, entity).put()
Apache License 2.0
jmchilton/galaxy-central
galaxy/datatypes/sniff.py
is_bed
python
def is_bed(headers, skip=0): try: if not headers: return False for hdr in headers[skip:]: try: map(int, [ hdr[1], hdr[2] ]) except: return False return True except: return False
Checks for 'bedness' >>> fname = get_test_fname('test_tab.bed') >>> headers = get_headers(fname, sep='\\t') >>> is_bed(headers) True >>> fname = get_test_fname('interval.bed') >>> headers = get_headers(fname, sep='\\t') >>> is_bed(headers) False
https://github.com/jmchilton/galaxy-central/blob/31e2fd3a32b06ddfba06ae5b044efdce1d93f08c/galaxy/datatypes/sniff.py#L221-L246
import logging, sys, os, csv, tempfile, shutil, re log = logging.getLogger(__name__) def get_test_fname(fname): path, name = os.path.split(__file__) full_path = os.path.join(path, 'test', fname) return full_path def stream_to_file(stream): fd, temp_name = tempfile.mkstemp() while 1: chunk = stream.read(1048576) if not chunk: break os.write(fd, chunk) os.close(fd) return temp_name def convert_newlines(fname): fd, temp_name = tempfile.mkstemp() os.close(fd) shutil.copyfile(fname, temp_name) fp = open(fname, "wt") for line in file(temp_name, "U"): line = line.strip() + '\n' fp.write(line) fp.close() os.remove(temp_name) def sep2tabs(fname, patt="\\s+"): fd, temp_name = tempfile.mkstemp() os.close(fd) shutil.copyfile(fname, temp_name) regexp = re.compile(patt) fp = open(fname, 'wt') for line in file(temp_name): line = line.strip() elems = regexp.split(line) fp.write('\t'.join(elems) + '\n') fp.close() os.remove(temp_name) def get_headers(fname, sep, count=30): headers = [] for idx, line in enumerate(file(fname)): line = line.strip() if idx == count: break headers.append( line.split(sep) ) return headers def is_column_based(fname, sep='\t'): headers = get_headers(fname, sep=sep) if not headers: return False count = len(headers[0]) if count < 2: return False for hdr in headers: if len(hdr) != count: return False return True def is_fasta(headers): try: return len(headers) > 1 and headers[0][0] and headers[0][0][0] == ">" except: return False def is_gff(headers): try: return len(headers) > 2 and headers[0][1] and headers[0][1].startswith('gff-version') except: return False def is_maf(headers): try: return len(headers) > 1 and headers[0][0] and headers[0][0] == "##maf" except: return False def is_lav(headers): try: return len(headers) > 1 and headers[0][0] and headers[0][0].startswith('#:lav') except: return False def is_axt(headers): try: return (len(headers) >= 4) and (headers[0][7] == '-' or headers[0][7] == '+') and (headers[3] == []) and (len(headers[0])==9 or len(headers[0])==10) except: return False def is_wiggle(headers): try: for idx, hdr in enumerate(headers): if hdr and hdr[0] == "track": return True if idx > 10: break return False except: return False
MIT License
smartbgp/yabgp
yabgp/message/open.py
Open.construct
python
def construct(self, my_capability): capas = b'' if 'afi_safi' in my_capability: capas += Capability(capa_code=1, capa_length=4).construct(my_capability) if my_capability.get('cisco_route_refresh'): capas += Capability(capa_code=128, capa_length=0).construct(my_capability) if my_capability.get('route_refresh'): capas += Capability(capa_code=2, capa_length=0).construct(my_capability) if self.asn > 65535: capas += Capability(capa_code=65, capa_length=4, capa_value=self.asn).construct(my_capability) self.asn = 23456 else: if my_capability.get('four_bytes_as'): capas += Capability(capa_code=65, capa_length=4, capa_value=self.asn).construct(my_capability) if my_capability.get('add_path'): capas += Capability(capa_code=69, capa_length=4, capa_value=my_capability['add_path']).construct() if my_capability.get('enhanced_route_refresh'): capas += Capability(capa_code=70, capa_length=0).construct() open_header = struct.pack('!BHHIB', self.version, self.asn, self.hold_time, self.bgp_id, len(capas)) message = open_header + capas return self.construct_header(message)
Construct a BGP Open message
https://github.com/smartbgp/yabgp/blob/f073633a813899cd9b413bc28ea2f7737deee141/yabgp/message/open.py#L232-L265
import struct import netaddr from yabgp.common import exception as excp from yabgp.common import constants as bgp_cons class Open(object): def __init__(self, version=None, asn=None, hold_time=None, bgp_id=None, opt_para_len=None, opt_paras=None): self.version = version self.asn = asn self.hold_time = hold_time self.bgp_id = bgp_id self.opt_para_len = opt_para_len self.opt_paras = opt_paras self.capa_dict = {} def parse(self, message): try: self.version, self.asn, self.hold_time, self.bgp_id, self.opt_para_len = struct.unpack('!BHHIB', message[:10]) except: raise excp.MessageHeaderError( sub_error=bgp_cons.ERR_MSG_HDR_BAD_MSG_LEN, data=message[:10]) self.bgp_id = str(netaddr.IPAddress(self.bgp_id)) if self.version != 4: raise excp.OpenMessageError( sub_error=bgp_cons.ERR_MSG_OPEN_UNSUP_VERSION, data=self.version) if isinstance(self.asn, float): self.asn = str(self.asn).split('.') self.asn = 65536 * (int(self.asn[0])) + int(self.asn[1]) if self.asn == 0: raise excp.OpenMessageError( sub_error=bgp_cons.ERR_MSG_OPEN_BAD_PEER_AS, data=self.asn) if self.bgp_id == 0: raise excp.OpenMessageError( sub_error=bgp_cons.ERR_MSG_OPEN_BAD_BGP_ID, data=self.bgp_id) if self.opt_para_len: self.opt_paras = message[10:] while self.opt_paras: opt_para_type, opt_para_length = struct.unpack('!BB', self.opt_paras[:2]) if opt_para_type != 2: raise excp.OpenMessageError( sub_error=bgp_cons.ERR_MSG_OPEN_UNSUP_OPT_PARAM, data=message[10:]) capabilities = self.opt_paras[2:opt_para_length + 2] while capabilities: capability = Capability() capability.parse(capabilities) if capability.capa_code == capability.FOUR_BYTES_ASN: asn = struct.unpack('!I', capability.capa_value)[0] self.asn = asn self.capa_dict['four_bytes_as'] = True elif capability.capa_code == capability.MULTIPROTOCOL_EXTENSIONS: if 'afi_safi' not in self.capa_dict: self.capa_dict['afi_safi'] = [] afi, res, safi = struct.unpack('!HBB', capability.capa_value) self.capa_dict['afi_safi'].append((afi, safi)) elif capability.capa_code == capability.ROUTE_REFRESH: self.capa_dict['route_refresh'] = True elif capability.capa_code == capability.CISCO_ROUTE_REFRESH: self.capa_dict['cisco_route_refresh'] = True elif capability.capa_code == capability.GRACEFUL_RESTART: self.capa_dict['graceful_restart'] = True elif capability.capa_code == capability.CISCO_MULTISESSION_BGP: self.capa_dict['cisco_multi_session'] = True elif capability.capa_code == capability.ENHANCED_ROUTE_REFRESH: self.capa_dict['enhanced_route_refresh'] = True elif capability.capa_code == capability.ADD_PATH: if 'add_path' not in self.capa_dict: self.capa_dict['add_path'] = [] while len(capability.capa_value) % 4 == 0 and capability.capa_value: afi, safi, send_rev = struct.unpack('!HBB', capability.capa_value[:4]) self.capa_dict['add_path'].append( { 'afi_safi': bgp_cons.AFI_SAFI_DICT[(afi, safi)], 'send/receive': bgp_cons.ADD_PATH_ACT_DICT[send_rev] } ) capability.capa_value = capability.capa_value[4:] elif capability.capa_code == capability.LLGR: self.capa_dict['LLGR'] = [] while len(capability.capa_value) >= 7: afi, safi, flag = struct.unpack('!HBB', capability.capa_value[:4]) time = struct.unpack('!I', b'\x00' + capability.capa_value[4:7])[0] self.capa_dict['LLGR'].append({ 'afi_safi': [afi, safi], 'time': time }) capability.capa_value = capability.capa_value[7:] elif capability.capa_code == capability.EXTENDED_NEXT_HOP: self.capa_dict['ext_nexthop'] = [] while len(capability.capa_value) > 0: afi, safi, nexthop = struct.unpack('!HHH', capability.capa_value[:6]) capability.capa_value = capability.capa_value[6:] self.capa_dict['ext_nexthop'].append({ "afi_safi": [afi, safi], "nexthop_afi": nexthop }) else: self.capa_dict[str(capability.capa_code)] = repr(capability.capa_value) capabilities = capabilities[2 + capability.capa_length:] self.opt_paras = self.opt_paras[opt_para_length + 2:] return { 'version': self.version, 'asn': self.asn, 'hold_time': self.hold_time, 'bgp_id': self.bgp_id, 'capabilities': self.capa_dict } @staticmethod def construct_header(msg): return b'\xff'*16 + struct.pack('!HB', len(msg) + 19, 1) + msg
Apache License 2.0
pbattaglia/scenesim
scenesim/physics/bulletbase.py
BulletBase.add_ghostnode
python
def add_ghostnode(node): name = "%s-ghost" % node.getName() ghost = NodePath(BulletGhostNode(name)) ghost.reparentTo(node) return ghost
Adds a child ghostnode to a node as a workaround for the ghost-static node collision detection problem.
https://github.com/pbattaglia/scenesim/blob/2633c63bc5cb97ea99017b2e25fc9b4f66d72605/scenesim/physics/bulletbase.py#L582-L588
from collections import Iterable from contextlib import contextmanager from functools import update_wrapper from itertools import combinations, izip from math import isnan, sqrt from warnings import warn import numpy as np from panda3d.bullet import (BulletBaseCharacterControllerNode, BulletBodyNode, BulletConstraint, BulletDebugNode, BulletGenericConstraint, BulletGhostNode, BulletVehicle, BulletWorld) from panda3d.core import (BitMask32, NodePath, Point3, PythonCallbackObject, Quat, TransformState, Vec3) from pdb import set_trace as BP nan = float("nan") class BulletBaseError(Exception): pass class DeactivationEnabledWarning(UserWarning): pass class CollisionMonitor(object): def __init__(self, world): self.world = world self.reset() self.use_callback = False def push_notifiers(self, bodies): if self.use_callback: self.notifies = {body: body.notifiesCollisions() for body in bodies} for body in bodies: body.notifyCollisions(True) self.world.setContactAddedCallback(PythonCallbackObject(self)) else: self.bodies = bodies self.reset() def pop_notifiers(self): if self.use_callback: self.world.clearContactAddedCallback() for body, notify in self.notifies.iteritems(): body.notifyCollisions(notify) else: pass def reset(self): self._N = 0 def __nonzero__(self): return self._N > 0 def __iadd__(self, x): self._N += x def __gt__(self, x): return self._N > x def __ge__(self, x): return self._N >= x def __lt__(self, x): return self._N < x def __le__(self, x): return self._N <= x def __call__(self, data): self.__iadd__(1) def detect18(self): if not self.use_callback: def detect(a, b): return self.world.contactTestPair(a, b).getNumContacts() > 0 n = sum(detect(a, b) for a, b in combinations(self.bodies, 2)) self += n class JointManager(dict): def __init__(self, *args, **kwargs): try: self.bbase = kwargs.pop("bbase") except KeyError: pass super(JointManager, self).__init__(*args, **kwargs) def __setitem__(self, key, val): if not isinstance(val, BulletConstraint): raise TypeError("Bad type: %s" % type(val)) try: self.bbase.attach(val) except AttributeError: BP() pass super(self.__class__, self).__setitem__(key, val) def __delitem__(self, key): try: self.bbase.remove(self[key]) except AttributeError: BP() pass super(self.__class__, self).__delitem__(key) def pop(self, key): joint = super(JointManager, self).pop(key) try: self.bbase.remove(joint) except AttributeError: pass return joint @staticmethod def make_fixed(np0, np1, type_=BulletGenericConstraint, cfm=0.01, erp=.99): t0 = np0.getTop() t1 = np1.getTop() p0 = np0.getPos(t0) p1 = np1.getPos(t1) q0 = np0.getQuat(t0) q1 = np1.getQuat(t1) pivot = Point3((p0 + p1) / 2.) disp = Point3((p1 - p0) / 2.) pivot_np = t0.attachNewNode("pivot-node") pivot_np.setPos(pivot) s = Vec3(1, 1, 1) q = Quat.identQuat() q0i = Quat(q0) q1i = Quat(q1) q0i.invertInPlace() q1i.invertInPlace() q0i *= q q1i *= q ts0 = TransformState.makePosQuatScale(disp, q0i, s) ts1 = TransformState.makePosQuatScale(-disp, q1i, s) pivot_np.removeNode() joint = type_(np0.node(), np1.node(), ts0, ts1, False) for ax in xrange(4): joint.setAngularLimit(ax, 0, 0) joint.setLinearLimit(ax, 0, 0) joint.setParam(type_.CPErp, erp) joint.setParam(type_.CPCfm, cfm) joint.setDebugDrawSize(2) return joint class BulletBase(object): ghost_bit = BitMask32.bit(1) static_bit = BitMask32.bit(2) dynamic_bit = ghost_bit | static_bit bw_types = (BulletBaseCharacterControllerNode, BulletBodyNode, BulletConstraint, BulletVehicle) def __init__(self): self.world = None self.sim_par = {"size": 1. / 100, "n_subs": 10, "size_sub": 1. / 1000} self.axis_constraint_fac = Vec3(1, 1, 1) self.axis_constraint_disp = Vec3(nan, nan, nan) self._destructables = () def init(self): self.world = BulletWorld() def destroy(self): for key in self._destructables: getattr(self, key).destroy() def setup_debug(self): debug_node = BulletDebugNode('Debug') debug_node.showWireframe(True) debug_node.showConstraints(True) debug_node.showBoundingBoxes(True) debug_node.showNormals(True) self.world.setDebugNode(debug_node) return debug_node @property def bodies(self): bodies = (self.world.getRigidBodies() + self.world.getSoftBodies() + self.world.getGhosts()) return bodies def _constrain_axis(self, body): for axis, (f, d) in enumerate(zip(self.axis_constraint_fac, self.axis_constraint_disp)): if not f and not isnan(d): nodep = NodePath(body) pos = nodep.getPos() pos[axis] = d nodep.setPos(pos) try: body.setLinearFactor(self.axis_constraint_fac) s = sum(self.axis_constraint_fac) if s == 3.: v = self.axis_constraint_fac elif s == 2.: v = -self.axis_constraint_fac + 1 else: v = Vec3.zero() body.setAngularFactor(v) except AttributeError: pass def set_axis_constraint(self, axis, on, disp=None): self.axis_constraint_fac[axis] = int(on) self.axis_constraint_disp[axis] = disp if disp is not None else nan for body in self.bodies: self._constrain_axis(body) def attach(self, objs, suppress_deact_warn=False): if not self.world: raise BulletBaseError("No BulletWorld initialized.") if not isinstance(objs, Iterable): objs = [objs] elif isinstance(objs, dict): objs = objs.itervalues() bw_objs = [] for obj in objs: if isinstance(obj, NodePath): obj = obj.node() if isinstance(obj, self.bw_types): bw_objs.append(obj) bw_objs = set(bw_objs) - set(self.bodies) for obj in bw_objs: de = getattr(obj, "isDeactivationEnabled", lambda: True)() if not suppress_deact_warn and de: msg = "Deactivation is enabled on object: %s" % obj warn(msg, DeactivationEnabledWarning) self._constrain_axis(obj) try: self.world.attach(obj) except AttributeError: DeprecationWarning("Upgrade to Panda3d 1.9.") for attr in ("attachRigidBody", "attachConstraint", "attachGhost"): attach = getattr(self.world, attr) try: attach(obj) except TypeError: pass else: break def remove(self, objs): if not self.world: raise BulletBaseError("No BulletWorld initialized.") if not isinstance(objs, Iterable): objs = [objs] elif isinstance(objs, dict): objs = objs.itervalues() bw_objs = [] for obj in objs: if isinstance(obj, NodePath): obj = obj.node() if isinstance(obj, self.bw_types): bw_objs.append(obj) for obj in bw_objs: try: self.world.remove(obj) except AttributeError: DeprecationWarning("Upgrade to Panda3d 1.9.") for attr in ("removeRigidBody", "removeConstraint", "removeGhost"): remove = getattr(self.world, attr) try: remove(obj) except TypeError: pass else: break def remove_all(self): objs = (self.world.getCharacters() + self.world.getConstraints() + self.world.getGhosts() + self.world.getRigidBodies() + self.world.getSoftBodies() + self.world.getVehicles()) self.remove(objs) @property def gravity(self): return self.world.getGravity() @gravity.setter def gravity(self, gravity): self.world.setGravity(Vec3(*gravity)) def step_fast(self): self.world.doPhysics(self.sim_par["size"], self.sim_par["n_subs"], self.sim_par["size_sub"]) def step(self, *args, **kwargs): dt = args[0] if len(args) > 0 else self.sim_par["size"] n_subs = args[1] if len(args) > 1 else self.sim_par["n_subs"] size_sub = args[2] if len(args) > 2 else self.sim_par["size_sub"] force = kwargs.get("force", None) if force: bodies, vecpos, dur = force dt0 = np.clip(dur, 0., dt) n_subs0 = int(np.ceil(n_subs * dt0 / dt)) dt1 = dt - dt0 n_subs1 = n_subs - n_subs0 + 1 for body in bodies: body.applyForce(Vec3(*vecpos[0]), Point3(*vecpos[1])) self.world.doPhysics(dt0, n_subs0, size_sub) for body in bodies: body.clearForces() else: dt1 = dt n_subs1 = n_subs self.world.doPhysics(dt1, n_subs1, size_sub) @staticmethod def attenuate_velocities(bodies, linvelfac=0., angvelfac=0.): for body in bodies: linvel0 = body.getLinearVelocity() angvel0 = body.getAngularVelocity() if linvel0.normalize(): linvel = linvel0 * linvelfac body.setLinearVelocity(linvel) if angvel0.normalize(): angvel = angvel0 * angvelfac body.setAngularVelocity(angvel) def repel(self, n_steps=1000, thresh=10, step_size=0.01): @contextmanager def repel_context(world): def change_contact_thresh(bodies, thresh=0.001): if isinstance(thresh, Iterable): it = izip(bodies, thresh) else: it = ((body, thresh) for body in bodies) thresh0 = [] for body, th in it: thresh0.append(body.getContactProcessingThreshold()) body.setContactProcessingThreshold(th) return thresh0 def rescale_masses(bodies): masses, inertias = zip(*[(body.getMass(), body.getInertia()) for body in bodies]) volumefac = 1. for body, mass, inertia in zip(bodies, masses, inertias): if mass > 0.: it = inertia / mass * 12 h = sqrt((it[0] - it[1] + it[2]) / 2) w = sqrt(it[2] - h ** 2) d = sqrt(it[1] - w ** 2) volume = h * w * d body.setMass(volume * volumefac) return masses bodies = world.getRigidBodies() gravity = world.getGravity() world.setGravity(Vec3.zero()) delta = -0.001 cp_thresh = change_contact_thresh(bodies, thresh=delta) masses = rescale_masses(bodies) deactivations = [b.isDeactivationEnabled() for b in bodies] for body in bodies: body.setDeactivationEnabled(False) self.attenuate_velocities(bodies) collisions = CollisionMonitor(world) collisions.push_notifiers(bodies) yield bodies, collisions collisions.pop_notifiers() self.attenuate_velocities(bodies) change_contact_thresh(bodies, thresh=cp_thresh) for body, mass in zip(bodies, masses): body.setMass(mass) world.setGravity(gravity) for body, d in zip(bodies, deactivations): body.setDeactivationEnabled(d) with repel_context(self.world) as (bodies, collisions): done_count = 0 for istep in xrange(n_steps): self.world.doPhysics(step_size, 1, step_size) collisions.detect18() if collisions: done_count = 0 else: done_count += 1 if any(body.getMass() > 0. and not body.isActive() for body in bodies): BP() if done_count >= thresh: break linvelfac = bool(collisions) * 0.001 angvelfac = bool(collisions) * 0.001 self.attenuate_velocities(bodies, linvelfac, angvelfac) collisions.reset() return istep @classmethod def add_collide_mask(cls, func0): def func(*args, **kwargs): node = func0(*args, **kwargs) if isinstance(node, BulletGhostNode): bit = cls.ghost_bit elif node.getMass() == 0.: bit = cls.static_bit else: bit = cls.dynamic_bit node.setCollideMask(bit) return node return update_wrapper(func0, func) @staticmethod
MIT License
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_microversion_info.py
BTMicroversionInfo.openapi_types
python
def openapi_types(): return { "microversion": (str,), }
This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type.
https://github.com/onshape-public/onshape-clients/blob/20843a00c628e516e7219e17a23ec4ef2bf9f16f/python/onshape_client/oas/models/bt_microversion_info.py#L66-L77
from __future__ import absolute_import import re import sys import six import nulltype from onshape_client.oas.model_utils import ( ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTMicroversionInfo(ModelNormal): allowed_values = {} validations = {} additional_properties_type = None @staticmethod
MIT License
decred/tinydecred
decred/decred/dcr/blockchain.py
LocalNode.header
python
def header(self, blockHash): try: return self.headerDB[blockHash] except database.NoValueError: header = self.rpc.getBlockHeader(blockHash, verbose=False) self.headerDB[header.cachedHash()] = header return header
Get the header, from the headerDB if possible, otherwise fetch from RPC. Args: blockHash (ByteArray): The block header hash. Returns: BlockHeader: The block header.
https://github.com/decred/tinydecred/blob/f7f7d9f7da8d49d9ae9a72e5579b07a3b8572267/decred/decred/dcr/blockchain.py#L66-L81
import time from decred.dcr import addrlib, rpc from decred.dcr.wire.msgblock import BlockHeader from decred.util import database, helpers from decred.util.encode import ByteArray log = helpers.getLogger("blockchain") class LocalNode: def __init__(self, netParams, dbPath, url, user, pw, certPath=None): self.netParams = netParams self.db = database.KeyValueDatabase(dbPath) self.mainchainDB = self.db.child( "mainchain", datatypes=("INTEGER", "BLOB"), blobber=ByteArray ) self.headerDB = self.db.child( "headers", blobber=BlockHeader, keyBlobber=ByteArray ) self.socketURL = helpers.makeWebsocketURL(url, "ws") self.rpc = None def connect(): self.close() self.rpc = rpc.WebsocketClient(self.socketURL, user, pw, cert=certPath) self.connect = connect connect() def close(self): if self.rpc: self.rpc.close() def connected(self): return self.rpc and not self.rpc.closed
ISC License
muneebalam/scrapenhl2
scrapenhl2/plot/app/player_page.py
generate_table
python
def generate_table(dataframe): return html.Table( [html.Tr([html.Th(col) for col in dataframe.columns])] + [html.Tr([html.Td(dataframe.iloc[i][col]) for col in dataframe.columns]) for i in range(len(dataframe))])
Transforms a pandas dataframe into an HTML table
https://github.com/muneebalam/scrapenhl2/blob/a9867f03d002773da852fc150f2976adc2ba8c25/scrapenhl2/plot/app/player_page.py#L24-L31
import pandas as pd import datetime import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.graph_objs as go import scrapenhl2.scrape.schedules as schedules import scrapenhl2.scrape.players as players import scrapenhl2.plot.rolling_cf_gf as rolling_cf_gf import scrapenhl2.plot.rolling_boxcars as rolling_boxcars import scrapenhl2.plot.visualization_helper as vhelper import scrapenhl2.scrape.team_info as team_info
MIT License
seldonio/alibi
alibi/explainers/cfrl_tabular.py
CounterfactualRLTabular._diversity
python
def _diversity(self, X: np.ndarray, Y_t: np.ndarray, C: Optional[List[Dict[str, List[Union[str, float]]]]], num_samples: int = 1, batch_size: int = 100, patience: int = 1000, tolerance: float = 1e-3) -> Explanation: if C is None: raise ValueError("A diverse set of counterfactual can not be generated if a `None` conditioning is " "used during training. Use the `explain` method to generate a counterfactual. The " "generation process is deterministic in its core. If conditioning is used during training " "a diverse set of counterfactual can be generated by restricting each feature condition " "to a subset to remain feasible.") if X.shape[0] != 1: raise ValueError("Only a single input instance can be passed.") if Y_t.shape[0] != 1: raise ValueError("Only a single label can be passed.") if (C is not None) and len(C) > 1: raise ValueError("At most, one condition can be passed.") X_repeated = np.tile(X, (batch_size, 1)) Y_t = np.tile(np.atleast_2d(Y_t), (batch_size, 1)) X_cf_buff = None for i in tqdm(count()): if i == patience: break if (X_cf_buff is not None) and (X_cf_buff.shape[0] >= num_samples): break C_vec = get_conditional_vector(X=X_repeated, condition=C[0] if len(C) else {}, preprocessor=self.params["encoder_preprocessor"], feature_names=self.params["feature_names"], category_map=self.params["category_map"], stats=self.params["stats"], immutable_features=self.params["immutable_features"], diverse=True) results = self._compute_counterfactual(X=X_repeated, Y_t=Y_t, C=C_vec) X_cf, Y_m_cf, Y_t = results["X_cf"], results["Y_m_cf"], results["Y_t"] X_cf = X_cf[Y_t == Y_m_cf] if X_cf.shape[0] == 0: continue _, indices = np.unique(np.floor(X_cf / tolerance).astype(int), return_index=True, axis=0) if X_cf_buff is None: X_cf_buff = X_cf[indices] else: X_cf_buff = np.concatenate([X_cf_buff, X_cf[indices]], axis=0) _, indices = np.unique(np.floor(X_cf_buff / tolerance).astype(int), return_index=True, axis=0) X_cf_buff = X_cf_buff[indices] X_cf = X_cf_buff[:num_samples] if (X_cf_buff is not None) else np.array([]) Y_m_cf = self.params["predictor"](X_cf) if X_cf.shape[0] != 0 else np.array([]) if self._is_classification(pred=Y_m_cf): Y_m_cf = np.argmax(Y_m_cf, axis=1) Y_m = self.params["predictor"](X) if self._is_classification(Y_m): Y_m = np.argmax(Y_m, axis=1) if self._is_classification(Y_t): Y_t = np.argmax(Y_t, axis=1) return self._build_explanation(X=X, Y_m=Y_m, X_cf=X_cf, Y_m_cf=Y_m_cf, Y_t=Y_t, C=C)
Generates a set of diverse counterfactuals given a single instance, target and conditioning. Parameters ---------- X Input instance. Y_t Target label. C List of conditional dictionaries. If `None`, it means that no conditioning was used during training (i.e. the `conditional_func` returns `None`). num_samples Number of counterfactual samples to be generated. batch_size Batch size used at inference. num_samples Number of diversity samples to be generated. Considered only if `diversity=True`. batch_size Batch size to use when generating counterfactuals. patience Maximum number of iterations to perform diversity search stops. If -1, the search stops only if the desired number of samples has been found. tolerance Tolerance to distinguish two counterfactual instances. Returns ------- Explanation object containing the diverse counterfactuals.
https://github.com/seldonio/alibi/blob/ef757b9579f85ef2e3dfc7088211969616ee3fdb/alibi/explainers/cfrl_tabular.py#L377-L497
from alibi.api.interfaces import Explainer, Explanation from alibi.utils.frameworks import has_pytorch, has_tensorflow from alibi.explainers.cfrl_base import CounterfactualRL, Postprocessing, _PARAM_TYPES from alibi.explainers.backends.cfrl_tabular import sample, get_conditional_vector, get_statistics import numpy as np from tqdm import tqdm from itertools import count from functools import partial from typing import Tuple, List, Dict, Callable, Union, Optional, TYPE_CHECKING if TYPE_CHECKING: import torch import tensorflow if has_pytorch: from alibi.explainers.backends.pytorch import cfrl_tabular as pytorch_tabular_backend if has_tensorflow: from alibi.explainers.backends.tensorflow import cfrl_tabular as tensorflow_tabular_backend class SampleTabularPostprocessing(Postprocessing): def __init__(self, category_map: Dict[int, List[str]], stats: Dict[int, Dict[str, float]]): super().__init__() self.category_map = category_map self.stats = stats def __call__(self, X_cf: List[np.ndarray], X: np.ndarray, C: Optional[np.ndarray]) -> List[np.ndarray]: return sample(X_hat_split=X_cf, X_ohe=X, C=C, stats=self.stats, category_map=self.category_map) class ConcatTabularPostprocessing(Postprocessing): def __call__(self, X_cf: List[np.ndarray], X: np.ndarray, C: Optional[np.ndarray]) -> np.ndarray: return np.concatenate(X_cf, axis=1) _PARAM_TYPES["complex"] += ["conditional_vector", "stats"] class CounterfactualRLTabular(CounterfactualRL): def __init__(self, predictor: Callable[[np.ndarray], np.ndarray], encoder: 'Union[tensorflow.keras.Model, torch.nn.Module]', decoder: 'Union[tensorflow.keras.Model, torch.nn.Module]', encoder_preprocessor: Callable, decoder_inv_preprocessor: Callable, coeff_sparsity: float, coeff_consistency: float, feature_names: List[str], category_map: Dict[int, List[str]], immutable_features: Optional[List[str]] = None, ranges: Optional[Dict[str, Tuple[int, int]]] = None, weight_num: float = 1.0, weight_cat: float = 1.0, latent_dim: Optional[int] = None, backend: str = "tensorflow", seed: int = 0, **kwargs): super().__init__(encoder=encoder, decoder=decoder, latent_dim=latent_dim, predictor=predictor, coeff_sparsity=coeff_sparsity, coeff_consistency=coeff_consistency, backend=backend, seed=seed, **kwargs) self.params["encoder_preprocessor"] = encoder_preprocessor self.params["decoder_inv_preprocessor"] = decoder_inv_preprocessor self.params["category_map"] = category_map self.params["feature_names"] = feature_names self.params["ranges"] = ranges if (ranges is not None) else dict() self.params["immutable_features"] = immutable_features if (immutable_features is not None) else list() self.params["weight_num"] = weight_num self.params["weight_cat"] = weight_cat if "sparsity_loss" not in kwargs: self.params["sparsity_loss"] = partial(self.backend.sparsity_loss, category_map=self.params["category_map"], weight_num=weight_num, weight_cat=weight_cat) if "consistency_loss" not in kwargs: self.params["consistency_loss"] = self.backend.consistency_loss if "conditional_func" not in kwargs: self.params["conditional_func"] = partial(self.backend.generate_condition, feature_names=self.params["feature_names"], category_map=self.params["category_map"], ranges=self.params["ranges"], immutable_features=self.params["immutable_features"]) if "conditional_vector" not in kwargs: self.params["conditional_vector"] = partial(get_conditional_vector, preprocessor=self.params["encoder_preprocessor"], feature_names=self.params["feature_names"], category_map=self.params["category_map"], ranges=self.params["ranges"], immutable_features=self.params["immutable_features"]) self.meta["params"].update(CounterfactualRLTabular._serialize_params(self.params)) def _select_backend(self, backend, **kwargs): return tensorflow_tabular_backend if backend == "tensorflow" else pytorch_tabular_backend def _validate_input(self, X: np.ndarray): if len(X.shape) != 2: raise ValueError(f"The input should be a 2D array. Found {len(X.shape)}D instead.") if X.shape[1] != len(self.params["feature_names"]): raise ValueError(f"Unexpected number of features. The expected number " f"is {len(self.params['feature_names'])}, but the input has {X.shape[1]} features.") return X def fit(self, X: np.ndarray) -> 'Explainer': self.params["stats"] = get_statistics(X=X, preprocessor=self.params["encoder_preprocessor"], category_map=self.params["category_map"]) self.params["postprocessing_funcs"] = [ SampleTabularPostprocessing(stats=self.params["stats"], category_map=self.params["category_map"]), ConcatTabularPostprocessing(), ] self.meta["params"].update(CounterfactualRLTabular._serialize_params(self.params)) self._validate_input(X) return super().fit(X) def explain(self, X: np.ndarray, Y_t: np.ndarray = None, C: Optional[List[Dict[str, List[Union[str, float]]]]] = None, batch_size: int = 100, diversity: bool = False, num_samples: int = 1, patience: int = 1000, tolerance: float = 1e-3) -> Explanation: self._validate_input(X) self._validate_target(Y_t) if diversity: return self._diversity(X=X, Y_t=Y_t, C=C, num_samples=num_samples, batch_size=batch_size, patience=patience, tolerance=tolerance) X_zeros = np.zeros((1, X.shape[1])) C_zeros = self.params["conditional_func"](X_zeros) if C is None: if C_zeros is not None: raise ValueError("A `None` conditioning is not a valid input when training with conditioning. " "If no feature conditioning is desired for the given input, `C` is expected to be an " "empty list. A `None` conditioning is valid only when no conditioning was used " "during training (i.e. `conditional_func` returns `None`).") return super().explain(X=X, Y_t=Y_t, C=C, batch_size=batch_size) elif C_zeros is None: raise ValueError("Conditioning different than `None` is not a valid input when training without " "conditioning. If feature conditioning is desired, consider defining an appropriate " "`conditional_func` that does not return `None`.") if len(C) == 0: C = [dict()] if len(C) != 1 and len(C) != X.shape[0]: raise ValueError("The number of conditions should be 1 or equals the number of samples in x.") if len(C) == 1: C_vec = self.params["conditional_vector"](X=X, condition=C[0], stats=self.params["stats"]) else: C_vecs = [] for i in range(len(C)): C_vecs.append(self.params["conditional_vector"](X=np.atleast_2d(X[i]), condition=C[i], stats=self.params["stats"])) C_vec = np.concatenate(C_vecs, axis=0) explanation = super().explain(X=X, Y_t=Y_t, C=C_vec, batch_size=batch_size) explanation.data.update({"condition": C}) return explanation
Apache License 2.0