gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import os
import argparse
import json
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from tmd.pwscf.parseScf import total_energy_eV_from_scf, fermi_from_scf, D_from_scf
from tmd.wannier.parseWout import atom_order_from_wout
from tmd.wannier.bands import Hk_recip
from tmd.wannier.findGaps import HrFindGaps
from tmd.bilayer.bilayer_util import global_config
from tmd.bilayer.dgrid import get_prefixes
from tmd.bilayer.wannier import get_Hr
def ds_from_prefixes(prefixes):
ds = []
for prefix in prefixes:
sp = prefix.split("_")
da = float(sp[-3])
db = float(sp[-1])
ds.append((da, db))
return ds
def wrap_cell(ds, values):
wrapped_ds, wrapped_values = [], []
for d, v in zip(ds, values):
da, db = d[0], d[1]
wrapped_ds.append(d)
wrapped_values.append(v)
if da == 0.0 and db == 0.0:
wrapped_ds.append((1.0, 1.0))
wrapped_values.append(v)
wrapped_ds.append((0.0, 1.0))
wrapped_values.append(v)
wrapped_ds.append((1.0, 0.0))
wrapped_values.append(v)
elif da == 0.0:
wrapped_ds.append((1.0, db))
wrapped_values.append(v)
elif db == 0.0:
wrapped_ds.append((da, 1.0))
wrapped_values.append(v)
return wrapped_ds, wrapped_values
def sorted_d_group(ds, values):
'''Zip ds with values and sort such that d's (and their associated values)
are in ascending order, with db ascending faster than da.
'''
dvs = list(zip(ds, values))
dvs = sorted(dvs, key=lambda dp: dp[0][1])
dvs = sorted(dvs, key=lambda dp: dp[0][0])
return dvs
def get_energies(work, dps):
energies = []
for d, prefix in dps:
wannier_dir = os.path.join(work, prefix, "wannier")
scf_path = os.path.join(wannier_dir, "scf.out")
energy = total_energy_eV_from_scf(scf_path)
energies.append(energy)
return energies
def energies_relative_to(energies, dps, base_d):
base_d_index = None
for d_i, (d, prefix) in enumerate(dps):
if d == (0.0, 0.0):
base_d_index = d_i
if base_d_index is None:
raise ValueError("d = (0, 0) not found")
base_energy = energies[base_d_index]
energies_rel_meV = []
for E in energies:
E_rel = (E - base_energy) * 1000
energies_rel_meV.append(E_rel)
return energies_rel_meV
def sort_order(xs, f):
with_orig_order = zip(xs, range(len(xs)))
def wrap_f(x):
return f(x[0])
xs_sorted = sorted(with_orig_order, key=wrap_f)
order = []
for x, orig_index in xs_sorted:
order.append(orig_index)
return order
def get_atom_order(work, prefix):
z_order_syms = ["X1", "M", "X2", "X1p", "Mp", "X2p"]
wannier_dir = os.path.join(work, prefix, "wannier")
wout_path = os.path.join(wannier_dir, "{}.wout".format(prefix))
atom_symbols, atom_indices, cart_coords = atom_order_from_wout(wout_path)
z_order = sort_order(cart_coords, lambda x: x[2]) # sort by z coord
atom_Hr_order = []
for z_val in z_order:
atom_Hr_order.append(z_order_syms[z_val])
return atom_Hr_order
def orbital_index(atom_Hr_order, sym, orbital, spin, soc=True):
'''sym is in ["X1", "M", "X2", "X1p", "Mp", "X2p"];
orbital is in ["pz", "px", "py", "dz2", "dxz", "dyz", "dx2-y2", "dxy"];
spin is in ["up", "down"].
spin is ignored if soc is False.
'''
if soc:
num_spins = 2
else:
num_spins = 1
X_syms = ["X1", "X2", "X1p", "X2p"]
M_syms = ["M", "Mp"]
X_num_orbitals = num_spins*3
M_num_orbitals = num_spins*5
orb_index = 0
for at_sym in atom_Hr_order:
if at_sym == sym:
break
if at_sym in X_syms:
orb_index += X_num_orbitals
elif at_sym in M_syms:
orb_index += M_num_orbitals
else:
raise ValueError("unexpected value in orbital_orders")
X_orbitals = ["pz", "px", "py"]
M_orbitals = ["dz2", "dxz", "dyz", "dx2-y2", "dxy"]
if sym in X_syms:
this_orbitals = X_orbitals
elif sym in M_syms:
this_orbitals = M_orbitals
else:
raise ValueError("unrecognized atom position symbol")
if orbital not in this_orbitals:
raise ValueError("orbital {} not present for atom {}".format(orbital, sym))
for orb in this_orbitals:
if orb == orbital:
break
orb_index += num_spins
if not soc or spin == "up":
return orb_index
elif spin == "down":
return orb_index + 1
else:
raise ValueError("unrecognized spin")
def extract_Hk_vals(work, dps, soc):
orbital_pairs = [("X2_X1p_z_z_uu_K", (1/3, 1/3, 0), ["X2", "pz", "up", "X1p", "pz", "up"]),
("X2_X1p_z_z_ud_K", (1/3, 1/3, 0), ["X2", "pz", "up", "X1p", "pz", "down"]),
("M_X1p_z2_z_uu_K", (1/3, 1/3, 0), ["M", "dz2", "up", "X1p", "pz", "up"]),
("M_X1p_z2_z_ud_K", (1/3, 1/3, 0), ["M", "dz2", "up", "X1p", "pz", "down"]),
("X2_Mp_z_z2_uu_K", (1/3, 1/3, 0), ["X2", "pz", "up", "Mp", "dz2", "up"]),
("X2_Mp_z_z2_ud_K", (1/3, 1/3, 0), ["X2", "pz", "up", "Mp", "dz2", "down"]),
("X2_X1p_z_z_uu_Kp", (-1/3, -1/3, 0), ["X2", "pz", "up", "X1p", "pz", "up"]),
("X2_X1p_z_z_ud_Kp", (-1/3, -1/3, 0), ["X2", "pz", "up", "X1p", "pz", "down"]),
("M_X1p_z2_z_uu_Kp", (-1/3, -1/3, 0), ["M", "dz2", "up", "X1p", "pz", "up"]),
("M_X1p_z2_z_ud_Kp", (-1/3, -1/3, 0), ["M", "dz2", "up", "X1p", "pz", "down"]),
("X2_Mp_z_z2_uu_Kp", (-1/3, -1/3, 0), ["X2", "pz", "up", "Mp", "dz2", "up"]),
("X2_Mp_z_z2_ud_Kp", (-1/3, -1/3, 0), ["X2", "pz", "up", "Mp", "dz2", "down"]),
("X2_X1p_z_z_uu_G", (0, 0, 0), ["X2", "pz", "up", "X1p", "pz", "up"]),
("X2_X1p_z_z_ud_G", (0, 0, 0), ["X2", "pz", "up", "X1p", "pz", "down"]),
("M_X1p_z2_z_uu_G", (0, 0, 0), ["M", "dz2", "up", "X1p", "pz", "up"]),
("M_X1p_z2_z_ud_G", (0, 0, 0), ["M", "dz2", "up", "X1p", "pz", "down"]),
("X2_Mp_z_z2_uu_G", (0, 0, 0), ["X2", "pz", "up", "Mp", "dz2", "up"]),
("X2_Mp_z_z2_ud_G", (0, 0, 0), ["X2", "pz", "up", "Mp", "dz2", "down"]),
("X2_X1p_z_z_uu_M", (1/2, 0, 0), ["X2", "pz", "up", "X1p", "pz", "up"]),
("X2_X1p_z_z_ud_M", (1/2, 0, 0), ["X2", "pz", "up", "X1p", "pz", "down"]),
("M_X1p_z2_z_uu_M", (1/2, 0, 0), ["M", "dz2", "up", "X1p", "pz", "up"]),
("M_X1p_z2_z_ud_M", (1/2, 0, 0), ["M", "dz2", "up", "X1p", "pz", "down"]),
("X2_Mp_z_z2_uu_M", (1/2, 0, 0), ["X2", "pz", "up", "Mp", "dz2", "up"]),
("X2_Mp_z_z2_ud_M", (1/2, 0, 0), ["X2", "pz", "up", "Mp", "dz2", "down"]),
("X2_X1p_z_z_uu_Mp", (-1/2, 0, 0), ["X2", "pz", "up", "X1p", "pz", "up"]),
("X2_X1p_z_z_ud_Mp", (-1/2, 0, 0), ["X2", "pz", "up", "X1p", "pz", "down"]),
("M_X1p_z2_z_uu_Mp", (-1/2, 0, 0), ["M", "dz2", "up", "X1p", "pz", "up"]),
("M_X1p_z2_z_ud_Mp", (-1/2, 0, 0), ["M", "dz2", "up", "X1p", "pz", "down"]),
("X2_Mp_z_z2_uu_Mp", (-1/2, 0, 0), ["X2", "pz", "up", "Mp", "dz2", "up"]),
("X2_Mp_z_z2_ud_Mp", (-1/2, 0, 0), ["X2", "pz", "up", "Mp", "dz2", "down"])]
# has the structure {"val_label_1": [val1(d1), val1(d2), ...],
# "val_label_2": [val2(d1), val2(d2), ...], ...}
Hk_vals = {}
for d, prefix in dps:
Hr = get_Hr(work, prefix)
atom_Hr_order = get_atom_order(work, prefix)
for label, klat, orb_types in orbital_pairs:
i_sym, i_orbital, i_spin = orb_types[0], orb_types[1], orb_types[2]
j_sym, j_orbital, j_spin = orb_types[3], orb_types[4], orb_types[5]
i_index = orbital_index(atom_Hr_order, i_sym, i_orbital, i_spin, soc)
j_index = orbital_index(atom_Hr_order, j_sym, j_orbital, j_spin, soc)
Hk = Hk_recip(klat, Hr)
val = Hk[i_index, j_index]
re_label, im_label = "{}_re".format(label), "{}_im".format(label)
if re_label not in Hk_vals:
Hk_vals[re_label] = []
if im_label not in Hk_vals:
Hk_vals[im_label] = []
Hk_vals[re_label].append(val.real)
Hk_vals[im_label].append(val.imag)
return Hk_vals
def system_all_gaps(work, prefix, E_below_fermi, E_above_fermi, num_dos, na, nb):
HrPath = os.path.join(work, prefix, "wannier", "{}_hr.dat".format(prefix))
scf_path = os.path.join(work, prefix, "wannier", "scf.out")
E_F = fermi_from_scf(scf_path)
minE = E_F - E_below_fermi
maxE = E_F + E_above_fermi
D = D_from_scf(scf_path)
R = 2 * np.pi * np.linalg.inv(D)
nc = 1
gaps, dos_vals, E_vals = HrFindGaps(minE, maxE, num_dos, na, nb, nc, R, HrPath)
return gaps, dos_vals, E_vals
def find_gaps(work, dps, E_below_fermi, E_above_fermi, num_dos, na, nb):
gap_call_args = []
for d, prefix in dps:
gap_call_args.append((work, prefix, E_below_fermi, E_above_fermi, num_dos, na, nb))
with Pool() as pool:
all_gaps_output = pool.starmap(system_all_gaps, gap_call_args)
gaps = []
for d_index, (d, prefix) in enumerate(dps):
this_gaps = all_gaps_output[d_index][0]
scf_path = os.path.join(work, prefix, "wannier", "scf.out")
E_F = fermi_from_scf(scf_path)
gap_at_fermi = None
for gap_interval in this_gaps:
# Check that either E_F is inside the gap, or E_F is close to
# valence band maximum or conduction band minimum.
gap_in_band_tolerance = 1e-2 # 10 meV tolerance
if ((E_F >= gap_interval[0] and E_F <= gap_interval[1])
or abs(gap_interval[0] - E_F) < gap_in_band_tolerance
or abs(E_F - gap_interval[1]) < gap_in_band_tolerance):
gap_at_fermi = gap_interval
break
if gap_at_fermi is not None:
gap_val = gap_at_fermi[1] - gap_at_fermi[0]
gaps.append(gap_val)
else:
gaps.append(0.0)
return gaps
def plot_d_vals(plot_name, title, dps, values):
xs, ys = [], []
xs_set, ys_set = set(), set()
for d, prefix in dps:
xs.append(d[0])
ys.append(d[1])
xs_set.add(d[0])
ys_set.add(d[1])
num_xs = len(xs_set)
num_ys = len(ys_set)
C_E = np.array(values).reshape((num_xs, num_ys))
plt.xlabel("$d_a$")
plt.ylabel("$d_b$")
num_ticks_xs, num_ticks_ys = 5, 5
d_ticks_xs = []
for x in np.linspace(0.0, 1.0, num_ticks_xs, endpoint=True):
d_ticks_xs.append("{:.2f}".format(x))
d_ticks_ys = []
for y in np.linspace(0.0, 1.0, num_ticks_ys, endpoint=True):
d_ticks_ys.append("{:.2f}".format(y))
plt.xticks(np.linspace(0.0, num_xs-1, num_ticks_xs, endpoint=True), d_ticks_xs)
plt.yticks(np.linspace(0.0, num_ys-1, num_ticks_ys, endpoint=True), d_ticks_ys)
plt.imshow(C_E.T, origin='lower', interpolation='none', cmap=cm.viridis)
plt.colorbar()
plt.title(title)
plt.savefig("{}.png".format(plot_name), bbox_inches='tight', dpi=500)
plt.clf()
def _main():
parser = argparse.ArgumentParser(description="Plot various quantities as function of displacement")
parser.add_argument("--subdir", type=str, default=None,
help="Subdirectory under work_base where calculation was run")
parser.add_argument('--global_prefix', type=str, default="MoS2_WS2",
help="Calculation global prefix")
args = parser.parse_args()
gconf = global_config()
work = os.path.expandvars(gconf["work_base"])
if args.subdir is not None:
work = os.path.join(work, args.subdir)
prefixes = get_prefixes(work, args.global_prefix)
ds = ds_from_prefixes(prefixes)
ds, prefixes = wrap_cell(ds, prefixes)
dps = sorted_d_group(ds, prefixes)
write_out_data = {"_ds": []}
for d, prefix in dps:
write_out_data["_ds"].append(d)
energies = get_energies(work, dps)
energies_rel_meV = energies_relative_to(energies, dps, (0.0, 0.0))
E_title = "$\\Delta E$ [meV]"
E_plot_name = "{}_energies".format(args.global_prefix)
plot_d_vals(E_plot_name, E_title, dps, energies_rel_meV)
write_out_data["meV_relative_total_energy"] = energies_rel_meV
soc = True
Hk_vals = extract_Hk_vals(work, dps, soc)
for label, this_vals in Hk_vals.items():
title = label
plot_name = "{}_{}".format(args.global_prefix, label)
plot_d_vals(plot_name, title, dps, this_vals)
write_out_data["eV_{}".format(label)] = this_vals
na, nb = 16, 16
num_dos = 1000
E_below_fermi, E_above_fermi = 3.0, 3.0
gaps = find_gaps(work, dps, E_below_fermi, E_above_fermi, num_dos, na, nb)
gap_plot_title = "Gaps [eV]"
gap_plot_name = "{}_gaps".format(args.global_prefix)
plot_d_vals(gap_plot_name, gap_plot_title, dps, gaps)
write_out_data["eV_overall_gap"] = gaps
with open("{}_plot_ds_data.json".format(args.global_prefix), 'w') as fp:
json.dump(write_out_data, fp)
if __name__ == "__main__":
_main()
|
|
import os
import scipy.stats
import SummedLikelihood
try:
import cPickle as pickle
except:
print("cPickle not found, using pickle instead")
from enrico.fitmaker import FitMaker
from enrico.gtfunction import Observation
from enrico import Loggin
from enrico import utils
class ModelTester(Loggin.Message):
"""Class to est several models to check
which is statistically prefered."""
def __init__(self, config):
super(ModelTester,self).__init__()
Loggin.Message.__init__(self)
self.config = config
self.folder = self.config['out']
utils.mkdir_p(self.folder+"/TestModel")
self.modellist = ["PowerLaw","LogParabola","PLSuperExpCutoff"]
'''
try:
with open(self.folder+"/TestModel/Fit.pickle","r") as pfile:
print("Retrieving previous Fit from %s" \
%(self.folder+"/TestModel/Fit.pickle"))
self.FitRunner = pickle.load(pfile)
self.Fit = self.FitRunner.CreateLikeObject()
except:
self._GenFit()
self.FitRunner.PerformFit(self.Fit, False)
with open(self.folder+"/TestModel/Fit.pickle","w") as pfile:
print("Saving current Fit to %s" \
%(self.folder+"/TestModel/Fit.pickle"))
pickle.dump(self.FitRunner,pfile)
'''
self._GenFit()
self.FitRunner.PerformFit(self.Fit, False)
# Store the results in a dictionnary
self.Results = {}
def _GenFit(self):
try :
del self.Fit
except :
pass
if self.config['Spectrum']['SummedLike'] == 'yes':
Obs1 = Observation(self.folder, self.config, tag="FRONT")
Obs2 = Observation(self.folder, self.config, tag="BACK")
self.FitRunnerfront = FitMaker(Obs1, self.config)
self.FitRunnerback = FitMaker(Obs2, self.config)
self.FitRunnerfront.CreateLikeObject()
self.FitRunnerback.CreateLikeObject()
self.Fit = SummedLikelihood.SummedLikelihood()
else:
Obs = Observation(self.folder, self.config, tag="")
self.FitRunner = FitMaker(Obs, self.config)##Class
#self.Fit = self.FitRunner.CreateLikeObject()
self.Fit = SummedLikelihood.SummedLikelihood()
self.Fit.addComponent(self.FitRunner.CreateLikeObject())
def _printResults(self):
print
self.info("Summary of the results")
for key in self.modellist:
if key == "PowerLaw":
print key," Log(Like) = ",self.Results[key]
llpl = self.Results[key]
else :
TS = 2*(self.Results[key]-llpl)
prob = 1 - scipy.stats.chi2.cdf(TS, 1)
print key," Log(Like) = ",self.Results[key]," TS = ",TS," Pvalue = ",prob
def TestModel(self):
""" actually test the models """
Dumpfile = open(self.folder+"/TestModel/TestModel.results","w")
for key in self.modellist:
self.Results[key] = self.RunAFit(self.config["target"]["name"],key)
Dumpfile.write(key + '\t' + str(self.Results[key]) + '\n')
Dumpfile.close()
self._printResults()
def TestModelFromFile(self,inputfile):
""" Set model and pars from file (test only a custom model)
This function allow us to test several custom models by calculating
their likelihood """
Inputfile = open(inputfile,'r')
with open(self.folder+"/TestModel/TestModel.results","w") as empty: pass
Dumpfile = open(self.folder+"/TestModel/TestModel.results","a+")
for currentmodel in Inputfile.readlines():
content = currentmodel.rstrip().split(",")
model = content[0]
pars = content[1:]
if model not in self.modellist:
print("WARNING: given model %s not in the valid range %s" \
%(str(model), str(self.modellist)))
continue
for k in xrange(len(pars)):
try: pars[k] = float(pars[k])
except: pars[k]=None
# Reduce the list of possible models to the current one
self.modellist = [model]
print("Using model %s with parameters %s" %(str(model), str(pars)))
self.Results[model] = self.RunAFit(self.config["target"]["name"],model,pars)
_sep_= ', '
Dumpfile.write(model + _sep_ + _sep_.join([str(k) for k in pars]) \
+ _sep_ + str(self.Results[model]) + '\n')
print("%s Log(Like) = %s" %(model,self.Results[model]))
Inputfile.close()
Dumpfile.close()
def RunAFit(self,srcname,model,pars=None):
# Compute the loglike for the current model for the given parameter set.
self.info("Computing loglike value for "+model)
for comp in self.Fit.components:
comp.logLike.getSource(srcname).setSpectrum(model)
if (pars==None):
pars=[None,None,None,None]
else:
# Release diffuse models (background)
for comp in self.Fit.components:
comp.thaw(self.Fit.par_index("IsoDiffModel", 'Normalization'))
comp.thaw(self.Fit.par_index("GalDiffModel", 'Value'))
for comp in self.Fit.components:
if model=="PowerLaw":
self._setPowerLaw(comp,srcname,pars)
if model=="LogParabola":
self._setLogParabola(comp,srcname,pars)
if model=="PLSuperExpCutoff":
self._setPLSuperExpCutoff(comp,srcname,pars)
#change the fit tolerance to the one given by the user
#self.Fit.ftol = float(self.config['fitting']['ftol'])
try :
self.Fit.fit(0,covar=True,optimizer=self.config["fitting"]["optimizer"])
spectrum = self.Fit[self.config['target']['name']].funcs['Spectrum']
# Get the names of the parameters for the source of interest
print "Loglike Value for ",model,": ",self.Fit.logLike.value()
self.Fit.writeXml(self.folder+"/TestModel/TestModel"+model+".xml")
loglikevalue = 0
for comp in self.Fit.components:
loglikevalue += comp.logLike.value()
return loglikevalue
except :
self.warning("No convergence for model : "+model+" ??")
return 0
def _setPowerLaw(self,comp,name,pars=None):
SrcSpectrum = comp.logLike.getSource(name).getSrcFuncs()['Spectrum']
SrcSpectrum.getParam('Prefactor').setBounds(1e-7,1e7)
SrcSpectrum.getParam('Prefactor').setScale(1e-11)
SrcSpectrum.getParam('Prefactor').setValue(1.)
SrcSpectrum.getParam('Prefactor').setFree(1)
SrcSpectrum.getParam('Index').setBounds(-5,0)
SrcSpectrum.getParam('Index').setValue(-2)
SrcSpectrum.getParam('Index').setFree(1)
SrcSpectrum.getParam('Scale').setValue(self.config['energy']['emin'])
SrcSpectrum.getParam('Scale').setBounds(20,3e6)
# Set each non-None parameter to the wanted value and fix it.
if pars[0]!=None:
print("Fixing Prefactor")
SrcSpectrum.getParam('Prefactor').setFree(0)
SrcSpectrum.getParam('Prefactor').setValue(pars[0]/1e-11)
par = comp.par_index(name, 'Prefactor')
comp.freeze(par)
if pars[1]!=None:
print("Fixing Index")
SrcSpectrum.getParam('Index').setScale(pars[1])
SrcSpectrum.getParam('Index').setFree(0)
par = comp.par_index(name, 'Prefactor')
comp-freeze(par)
def _setLogParabola(self,comp,name,pars=None):
SrcSpectrum = comp.logLike.getSource(name).getSrcFuncs()['Spectrum']
SrcSpectrum.getParam('norm').setBounds(1e-7,1e7)
SrcSpectrum.getParam('norm').setScale(1e-11)
SrcSpectrum.getParam('norm').setValue(1.)
SrcSpectrum.getParam('norm').setFree(1)
SrcSpectrum.getParam('alpha').setBounds(0,5)
SrcSpectrum.getParam('alpha').setValue(2)
SrcSpectrum.getParam('alpha').setFree(1)
SrcSpectrum.getParam('beta').setBounds(0.01,10)
SrcSpectrum.getParam('beta').setValue(0.5)
SrcSpectrum.getParam('beta').setFree(1)
SrcSpectrum.getParam('Eb').setValue(self.config['energy']['emin'])
SrcSpectrum.getParam('Eb').setFree(0)
SrcSpectrum.getParam('Eb').setBounds(20,3e6)
# Set each non-None parameter to the wanted value and fix it.
if pars[0]!=None:
print("Fixing norm")
SrcSpectrum.getParam('norm').setFree(0)
SrcSpectrum.getParam('norm').setValue(pars[0]/1e-11)
par = comp.par_index(name, 'norm')
comp.freeze(par)
if pars[1]!=None:
print("Fixing alpha")
SrcSpectrum.getParam('alpha').setFree(0)
SrcSpectrum.getParam('alpha').setScale(pars[1])
par = comp.par_index(name, 'alpha')
comp.freeze(par)
if pars[2]!=None:
print("Fixing beta")
SrcSpectrum.getParam('beta').setFree(0)
SrcSpectrum.getParam('beta').setScale(pars[2])
par = comp.par_index(name, 'beta')
comp.freeze(par)
def _setPLSuperExpCutoff(self,comp,name,pars=None):
SrcSpectrum = comp.logLike.getSource(name).getSrcFuncs()['Spectrum']
SrcSpectrum.getParam('Prefactor').setBounds(1e-7,1e7)
SrcSpectrum.getParam('Prefactor').setScale(1e-11)
SrcSpectrum.getParam('Prefactor').setValue(1.)
SrcSpectrum.getParam('Prefactor').setFree(1)
SrcSpectrum.getParam('Index1').setBounds(-5,0)
SrcSpectrum.getParam('Index1').setValue(-2)
SrcSpectrum.getParam('Index1').setFree(1)
SrcSpectrum.getParam('Index2').setValue(-1)
SrcSpectrum.getParam('Index2').setBounds(-5,-0.05)
SrcSpectrum.getParam('Index2').setFree(0)
SrcSpectrum.getParam('Cutoff').setBounds(20,3e6)
SrcSpectrum.getParam('Cutoff').setValue(1e4)
SrcSpectrum.getParam('Cutoff').setFree(1)
SrcSpectrum.getParam('Scale').setValue(self.config['energy']['emin'])
SrcSpectrum.getParam('Scale').setBounds(20,3e6)
# Set each non-None parameter to the wanted value and fix it.
if pars[0]!=None:
print("Fixing Prefactor")
SrcSpectrum.getParam('Prefactor').setFree(0)
SrcSpectrum.getParam('Prefactor').setValue(pars[0]/1e-11)
par = comp.par_index(name, 'Prefactor')
comp.freeze(par)
if pars[1]!=None:
print("Fixing Index1")
SrcSpectrum.getParam('Index1').setScale(pars[1])
SrcSpectrum.getParam('Index1').setFree(0)
par = comp.par_index(name, 'Index1')
comp.freeze(par)
if pars[2]!=None:
print("Fixing Index2")
SrcSpectrum.getParam('Index2').setScale(pars[2])
SrcSpectrum.getParam('Index2').setFree(0)
par = comp.par_index(name, 'Index2')
comp.freeze(par)
if pars[3]!=None:
print("Fixing Cutoff")
SrcSpectrum.getParam('Cutoff').setScale(pars[3])
SrcSpectrum.getParam('Cutoff').setFree(0)
par = comp.par_index(name, 'Cutoff')
comp.freeze(par)
|
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetContactLogsRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client_id': 'str',
'start_date': 'datetime',
'end_date': 'datetime',
'staff_ids': 'list[int]',
'show_system_generated': 'bool',
'type_ids': 'list[int]',
'subtype_ids': 'list[int]',
'limit': 'int',
'offset': 'int'
}
attribute_map = {
'client_id': 'ClientId',
'start_date': 'StartDate',
'end_date': 'EndDate',
'staff_ids': 'StaffIds',
'show_system_generated': 'ShowSystemGenerated',
'type_ids': 'TypeIds',
'subtype_ids': 'SubtypeIds',
'limit': 'Limit',
'offset': 'Offset'
}
def __init__(self, client_id=None, start_date=None, end_date=None, staff_ids=None, show_system_generated=None, type_ids=None, subtype_ids=None, limit=None, offset=None): # noqa: E501
"""GetContactLogsRequest - a model defined in Swagger""" # noqa: E501
self._client_id = None
self._start_date = None
self._end_date = None
self._staff_ids = None
self._show_system_generated = None
self._type_ids = None
self._subtype_ids = None
self._limit = None
self._offset = None
self.discriminator = None
self.client_id = client_id
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if staff_ids is not None:
self.staff_ids = staff_ids
if show_system_generated is not None:
self.show_system_generated = show_system_generated
if type_ids is not None:
self.type_ids = type_ids
if subtype_ids is not None:
self.subtype_ids = subtype_ids
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
@property
def client_id(self):
"""Gets the client_id of this GetContactLogsRequest. # noqa: E501
The ID of the client whose contact logs are being requested. # noqa: E501
:return: The client_id of this GetContactLogsRequest. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this GetContactLogsRequest.
The ID of the client whose contact logs are being requested. # noqa: E501
:param client_id: The client_id of this GetContactLogsRequest. # noqa: E501
:type: str
"""
if client_id is None:
raise ValueError("Invalid value for `client_id`, must not be `None`") # noqa: E501
self._client_id = client_id
@property
def start_date(self):
"""Gets the start_date of this GetContactLogsRequest. # noqa: E501
Filters the results to contact logs created on or after this date.<br /> Default: **the current date** # noqa: E501
:return: The start_date of this GetContactLogsRequest. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this GetContactLogsRequest.
Filters the results to contact logs created on or after this date.<br /> Default: **the current date** # noqa: E501
:param start_date: The start_date of this GetContactLogsRequest. # noqa: E501
:type: datetime
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this GetContactLogsRequest. # noqa: E501
Filters the results to contact logs created before this date.<br /> Default: **the start date** # noqa: E501
:return: The end_date of this GetContactLogsRequest. # noqa: E501
:rtype: datetime
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this GetContactLogsRequest.
Filters the results to contact logs created before this date.<br /> Default: **the start date** # noqa: E501
:param end_date: The end_date of this GetContactLogsRequest. # noqa: E501
:type: datetime
"""
self._end_date = end_date
@property
def staff_ids(self):
"""Gets the staff_ids of this GetContactLogsRequest. # noqa: E501
Filters the results to return contact logs assigned to one or more staff IDs. # noqa: E501
:return: The staff_ids of this GetContactLogsRequest. # noqa: E501
:rtype: list[int]
"""
return self._staff_ids
@staff_ids.setter
def staff_ids(self, staff_ids):
"""Sets the staff_ids of this GetContactLogsRequest.
Filters the results to return contact logs assigned to one or more staff IDs. # noqa: E501
:param staff_ids: The staff_ids of this GetContactLogsRequest. # noqa: E501
:type: list[int]
"""
self._staff_ids = staff_ids
@property
def show_system_generated(self):
"""Gets the show_system_generated of this GetContactLogsRequest. # noqa: E501
When `true`, system-generated contact logs are returned in the results.<br /> Default: **false** # noqa: E501
:return: The show_system_generated of this GetContactLogsRequest. # noqa: E501
:rtype: bool
"""
return self._show_system_generated
@show_system_generated.setter
def show_system_generated(self, show_system_generated):
"""Sets the show_system_generated of this GetContactLogsRequest.
When `true`, system-generated contact logs are returned in the results.<br /> Default: **false** # noqa: E501
:param show_system_generated: The show_system_generated of this GetContactLogsRequest. # noqa: E501
:type: bool
"""
self._show_system_generated = show_system_generated
@property
def type_ids(self):
"""Gets the type_ids of this GetContactLogsRequest. # noqa: E501
Filters the results to contact logs assigned one or more of these type IDs. # noqa: E501
:return: The type_ids of this GetContactLogsRequest. # noqa: E501
:rtype: list[int]
"""
return self._type_ids
@type_ids.setter
def type_ids(self, type_ids):
"""Sets the type_ids of this GetContactLogsRequest.
Filters the results to contact logs assigned one or more of these type IDs. # noqa: E501
:param type_ids: The type_ids of this GetContactLogsRequest. # noqa: E501
:type: list[int]
"""
self._type_ids = type_ids
@property
def subtype_ids(self):
"""Gets the subtype_ids of this GetContactLogsRequest. # noqa: E501
Filters the results to contact logs assigned one or more of these subtype IDs. # noqa: E501
:return: The subtype_ids of this GetContactLogsRequest. # noqa: E501
:rtype: list[int]
"""
return self._subtype_ids
@subtype_ids.setter
def subtype_ids(self, subtype_ids):
"""Sets the subtype_ids of this GetContactLogsRequest.
Filters the results to contact logs assigned one or more of these subtype IDs. # noqa: E501
:param subtype_ids: The subtype_ids of this GetContactLogsRequest. # noqa: E501
:type: list[int]
"""
self._subtype_ids = subtype_ids
@property
def limit(self):
"""Gets the limit of this GetContactLogsRequest. # noqa: E501
Number of results to include, defaults to 100 # noqa: E501
:return: The limit of this GetContactLogsRequest. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this GetContactLogsRequest.
Number of results to include, defaults to 100 # noqa: E501
:param limit: The limit of this GetContactLogsRequest. # noqa: E501
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this GetContactLogsRequest. # noqa: E501
Page offset, defaults to 0. # noqa: E501
:return: The offset of this GetContactLogsRequest. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this GetContactLogsRequest.
Page offset, defaults to 0. # noqa: E501
:param offset: The offset of this GetContactLogsRequest. # noqa: E501
:type: int
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetContactLogsRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetContactLogsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
import os
import numpy as np
import pytest
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Nano
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"kwargs,msg",
[
(dict(duplicates="drop"), None),
(dict(), "Bin edges must be unique"),
(dict(duplicates="raise"), "Bin edges must be unique"),
(dict(duplicates="foo"), "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = qcut(ser, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:59:59.999999999", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"arg,expected_bins",
[
[
timedelta_range("1day", periods=3),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
],
[
date_range("20180101", periods=3),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]),
],
],
)
def test_date_like_qcut_bins(arg, expected_bins):
# see gh-19891
ser = Series(arg)
result, result_bins = qcut(ser, 2, retbins=True)
tm.assert_index_equal(result_bins, expected_bins)
|
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import debian_ip
# Import third party libs
import jinja2.exceptions
# Globals
debian_ip.__grains__ = {}
debian_ip.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class DebianIpTestCase(TestCase):
'''
Test cases for salt.modules.debian_ip
'''
# 'build_bond' function tests: 3
@patch('salt.modules.debian_ip._parse_settings_bond',
MagicMock(return_value={}))
@patch('salt.modules.debian_ip._write_file', MagicMock(return_value=True))
def test_build_bond(self):
'''
Test if it create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
'''
mock = MagicMock(return_value=1)
with patch.dict(debian_ip.__grains__, {'osrelease': mock}):
mock = MagicMock(return_value=True)
with patch.dict(debian_ip.__salt__, {'kmod.load': mock,
'pkg.install': mock}):
self.assertEqual(debian_ip.build_bond('bond0'), '')
@patch('salt.modules.debian_ip._parse_settings_bond',
MagicMock(return_value={}))
def test_build_bond_exception(self):
'''
Test if it create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
'''
mock = MagicMock(return_value=1)
with patch.dict(debian_ip.__grains__, {'osrelease': mock}):
mock = MagicMock(side_effect=
jinja2.exceptions.TemplateNotFound('error'))
with patch.object(jinja2.Environment, 'get_template', mock):
self.assertEqual(debian_ip.build_bond('bond0'), '')
@patch('salt.modules.debian_ip._parse_settings_bond',
MagicMock(return_value={}))
@patch('salt.modules.debian_ip._read_temp', MagicMock(return_value=True))
def test_build_bond_data(self):
'''
Test if it create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
'''
mock = MagicMock(return_value=1)
with patch.dict(debian_ip.__grains__, {'osrelease': mock}):
self.assertTrue(debian_ip.build_bond('bond0', test='True'))
# 'build_interface' function tests: 1
@patch('salt.modules.debian_ip._write_file_ifaces',
MagicMock(return_value='salt'))
def test_build_interface(self):
'''
Test if it builds an interface script for a network interface.
'''
self.assertEqual(debian_ip.build_interface('eth0', 'eth', 'enabled'),
['s\n', 'a\n', 'l\n', 't\n'])
self.assertTrue(debian_ip.build_interface('eth0', 'eth', 'enabled',
test='True'))
with patch.object(debian_ip, '_parse_settings_eth',
MagicMock(return_value={'routes': []})):
self.assertRaises(AttributeError, debian_ip.build_interface,
'eth0', 'bridge', 'enabled')
self.assertRaises(AttributeError, debian_ip.build_interface,
'eth0', 'slave', 'enabled')
self.assertRaises(AttributeError, debian_ip.build_interface,
'eth0', 'bond', 'enabled')
self.assertTrue(debian_ip.build_interface('eth0', 'eth', 'enabled',
test='True'))
# 'build_routes' function tests: 2
@patch('salt.modules.debian_ip._parse_routes',
MagicMock(return_value={'routes': []}))
@patch('salt.modules.debian_ip._write_file_routes',
MagicMock(return_value=True))
@patch('salt.modules.debian_ip._read_file', MagicMock(return_value='salt'))
def test_build_routes(self):
'''
Test if it add route scripts for a network interface using up commands.
'''
self.assertEqual(debian_ip.build_routes('eth0'), 'saltsalt')
@patch('salt.modules.debian_ip._parse_routes',
MagicMock(return_value={'routes': []}))
def test_build_routes_exception(self):
'''
Test if it add route scripts for a network interface using up commands.
'''
self.assertTrue(debian_ip.build_routes('eth0', test='True'))
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound('err'))
with patch.object(jinja2.Environment, 'get_template', mock):
self.assertEqual(debian_ip.build_routes('eth0'), '')
# 'down' function tests: 1
def test_down(self):
'''
Test if it shutdown a network interface
'''
self.assertEqual(debian_ip.down('eth0', 'slave'), None)
mock = MagicMock(return_value='Salt')
with patch.dict(debian_ip.__salt__, {'cmd.run': mock}):
self.assertEqual(debian_ip.down('eth0', 'eth'), 'Salt')
# 'get_bond' function tests: 1
def test_get_bond(self):
'''
Test if it return the content of a bond script
'''
self.assertEqual(debian_ip.get_bond('bond0'), '')
# 'get_interface' function tests: 1
def test_get_interface(self):
'''
Test if it return the contents of an interface script
'''
with patch.object(debian_ip, '_parse_interfaces',
MagicMock(return_value={})):
self.assertListEqual(debian_ip.get_interface('eth0'), [])
mock_ret = {'lo': {'enabled': True, 'data':
{'inet': {'addrfam': 'inet', 'proto': 'loopback'}}}}
with patch.object(debian_ip, '_parse_interfaces',
MagicMock(return_value=mock_ret)):
self.assertListEqual(debian_ip.get_interface('lo'),
[u'auto lo\n', u'iface lo inet loopback\n',
u'\n'])
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound
('error'))
with patch.object(jinja2.Environment, 'get_template', mock):
self.assertEqual(debian_ip.get_interface('lo'), '')
# 'up' function tests: 1
def test_up(self):
'''
Test if it start up a network interface
'''
self.assertEqual(debian_ip.down('eth0', 'slave'), None)
mock = MagicMock(return_value='Salt')
with patch.dict(debian_ip.__salt__, {'cmd.run': mock}):
self.assertEqual(debian_ip.up('eth0', 'eth'), 'Salt')
# 'get_network_settings' function tests: 1
@patch('salt.modules.debian_ip._parse_hostname',
MagicMock(return_value='SaltStack'))
@patch('salt.modules.debian_ip._parse_domainname',
MagicMock(return_value='saltstack.com'))
def test_get_network_settings(self):
'''
Test if it return the contents of the global network script.
'''
with patch.dict(debian_ip.__grains__, {'osfullname': 'Ubuntu',
'osrelease': '14'}):
mock_avai = MagicMock(return_value=True)
with patch.dict(debian_ip.__salt__, {'service.available': mock_avai,
'service.status': mock_avai}):
self.assertEqual(debian_ip.get_network_settings(),
[u'NETWORKING=yes\n',
u'HOSTNAME=SaltStack\n',
u'DOMAIN=saltstack.com\n'])
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound
('error'))
with patch.object(jinja2.Environment, 'get_template', mock):
self.assertEqual(debian_ip.get_network_settings(), '')
# 'get_routes' function tests: 1
@patch('salt.modules.debian_ip._read_file', MagicMock(return_value='salt'))
def test_get_routes(self):
'''
Test if it return the routes for the interface
'''
self.assertEqual(debian_ip.get_routes('eth0'), 'saltsalt')
# 'apply_network_settings' function tests: 1
def test_apply_network_settings(self):
'''
Test if it apply global network configuration.
'''
mock = MagicMock(return_value=True)
with patch.dict(debian_ip.__salt__, {'network.mod_hostname': mock,
'service.stop': mock,
'service.start': mock}):
self.assertEqual(debian_ip.apply_network_settings(), True)
# 'build_network_settings' function tests: 1
@patch('salt.modules.debian_ip._parse_network_settings',
MagicMock(return_value={'networking': 'yes',
'hostname': 'Salt.saltstack.com',
'domainname': 'saltstack.com'}))
@patch('salt.modules.debian_ip._write_file_network',
MagicMock(return_value=True))
def test_build_network_settings(self):
'''
Test if it build the global network script.
'''
with patch.dict(debian_ip.__grains__, {'osfullname': 'Ubuntu',
'osrelease': '14'}):
mock = MagicMock(return_value=True)
with patch.dict(debian_ip.__salt__, {'service.available': mock,
'service.disable': mock,
'service.enable': mock}):
self.assertEqual(debian_ip.build_network_settings(),
[u'NETWORKING=yes\n',
u'HOSTNAME=Salt\n',
u'DOMAIN=saltstack.com\n'])
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound
('error'))
with patch.object(jinja2.Environment, 'get_template', mock):
self.assertEqual(debian_ip.build_network_settings(), '')
with patch.dict(debian_ip.__grains__, {'osfullname': 'Ubuntu',
'osrelease': '10'}):
mock = MagicMock(return_value=True)
with patch.dict(debian_ip.__salt__, {'service.available': mock,
'service.disable': mock,
'service.enable': mock}):
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound
('error'))
with patch.object(jinja2.Environment, 'get_template', mock):
self.assertEqual(debian_ip.build_network_settings(), '')
with patch.object(debian_ip, '_read_temp',
MagicMock(return_value=True)):
self.assertTrue(debian_ip.build_network_settings
(test='True'))
if __name__ == '__main__':
from integration import run_tests
run_tests(DebianIpTestCase, needs_daemon=False)
|
|
# -*- coding: utf-8 -*-
"""
.. _tut-timefreq-twoway-anova:
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to multiple
comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Alex Rockhill <aprockhill@mailbox.org>
#
# License: BSD-3-Clause
# %%
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
# %%
# Set parameters
# --------------
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_raw.fif'
event_fname = meg_path / 'sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
# %%
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
# %%
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
# %%
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
# %%
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# so we have replications x conditions x observations
# where the time-frequency observations are freqs x times:
print(data.shape)
# %%
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table:: Sample data layout
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... ... ... ... ...
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
fig, axes = plt.subplots(3, 1, figsize=(6, 6))
# let's visualize our effects by computing f-images
for effect, sig, effect_label, ax in zip(fvals, pvals, effect_labels, axes):
# show naive F-values in gray
ax.imshow(effect, cmap='gray', aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
# create mask for significant time-frequency locations
effect[sig >= 0.05] = np.nan
c = ax.imshow(effect, cmap='autumn', aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
fig.colorbar(c, ax=ax)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title(f'Time-locked response for "{effect_label}" ({ch_name})')
fig.tight_layout()
# %%
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
# %%
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects x conditions x observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
F_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None, out_type='mask')
# %%
# Create new stats image with only significant clusters:
good_clusters = np.where(cluster_p_values < .05)[0]
F_obs_plot = F_obs.copy()
F_obs_plot[~clusters[np.squeeze(good_clusters)]] = np.nan
fig, ax = plt.subplots(figsize=(6, 4))
for f_image, cmap in zip([F_obs, F_obs_plot], ['gray', 'autumn']):
c = ax.imshow(f_image, cmap=cmap, aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
fig.colorbar(c, ax=ax)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title(f'Time-locked response for "modality by location" ({ch_name})\n'
'cluster-level corrected (p <= 0.05)')
fig.tight_layout()
# %%
# Now using FDR:
mask, _ = fdr_correction(pvals[2])
F_obs_plot2 = F_obs.copy()
F_obs_plot2[~mask.reshape(F_obs_plot.shape)] = np.nan
fig, ax = plt.subplots(figsize=(6, 4))
for f_image, cmap in zip([F_obs, F_obs_plot2], ['gray', 'autumn']):
c = ax.imshow(f_image, cmap=cmap, aspect='auto', origin='lower',
extent=[times[0], times[-1], freqs[0], freqs[-1]])
fig.colorbar(c, ax=ax)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title(f'Time-locked response for "modality by location" ({ch_name})\n'
'FDR corrected (p <= 0.05)')
fig.tight_layout()
# %%
# Both cluster-level and FDR correction help get rid of potential
# false-positives that we saw in the naive f-images. The cluster permutation
# correction is biased toward time-frequencies with contiguous areas of high
# or low power, which is likely appropriate given the highly correlated nature
# of this data. This is the most likely explanation for why one cluster was
# preserved by the cluster permutation correction, but no time-frequencies
# were significant using the FDR correction.
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LicensesTransport(abc.ABC):
"""Abstract transport class for Licenses."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
self.set_iam_policy, default_timeout=None, client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteLicenseRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetLicenseRequest], Union[compute.License, Awaitable[compute.License]]
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
) -> Callable[
[compute.GetIamPolicyLicenseRequest],
Union[compute.Policy, Awaitable[compute.Policy]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertLicenseRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListLicensesRequest],
Union[compute.LicensesListResponse, Awaitable[compute.LicensesListResponse]],
]:
raise NotImplementedError()
@property
def set_iam_policy(
self,
) -> Callable[
[compute.SetIamPolicyLicenseRequest],
Union[compute.Policy, Awaitable[compute.Policy]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsLicenseRequest],
Union[
compute.TestPermissionsResponse, Awaitable[compute.TestPermissionsResponse]
],
]:
raise NotImplementedError()
__all__ = ("LicensesTransport",)
|
|
"""Clipper Management Utilities"""
from __future__ import print_function, with_statement, absolute_import
from fabric.api import *
from fabric.contrib.files import append
import os
import requests
import json
import yaml
import pprint
import subprocess32 as subprocess
import shutil
from sklearn import base
from sklearn.externals import joblib
from cStringIO import StringIO
import sys
from .cloudpickle import CloudPickler
import time
import re
__all__ = ['Clipper']
cur_dir = os.path.dirname(os.path.abspath(__file__))
MODEL_REPO = "/tmp/clipper-models"
DOCKER_NW = "clipper_nw"
CONTAINER_CONDA_PLATFORM = 'linux-64'
REDIS_STATE_DB_NUM = 1
REDIS_MODEL_DB_NUM = 2
REDIS_CONTAINER_DB_NUM = 3
REDIS_RESOURCE_DB_NUM = 4
REDIS_APPLICATION_DB_NUM = 5
DEFAULT_REDIS_IP = "redis"
DEFAULT_REDIS_PORT = 6379
CLIPPER_QUERY_PORT = 1337
CLIPPER_MANAGEMENT_PORT = 1338
CLIPPER_RPC_PORT = 7000
CLIPPER_LOGS_PATH = "/tmp/clipper-logs"
CLIPPER_DOCKER_LABEL = "ai.clipper.container.label"
CLIPPER_MODEL_CONTAINER_LABEL = "ai.clipper.model_container.model_version"
DEFAULT_LABEL = ["DEFAULT"]
aws_cli_config = """
[default]
region = us-east-1
aws_access_key_id = {access_key}
aws_secret_access_key = {secret_key}
"""
LOCAL_HOST_NAMES = ["local", "localhost", "127.0.0.1"]
EXTERNALLY_MANAGED_MODEL = "EXTERNAL"
class ClipperManagerException(Exception):
pass
class Clipper:
"""
Connection to a Clipper instance for administrative purposes.
Sets up the machine for running Clipper. This includes verifying
SSH credentials and initializing Docker.
Docker and docker-compose must already by installed on the machine
before connecting to a machine.
Parameters
----------
host : str
The hostname of the machine to start Clipper on. The machine
should allow passwordless SSH access.
user : str, optional
The SSH username. This field must be specified if `host` is not local.
key_path : str, optional.
The path to the SSH private key. This field must be specified if `host` is not local.
sudo : bool, optional.
Specifies level of execution for docker commands (sudo if true, standard if false).
ssh_port : int, optional
The SSH port to use. Default is port 22.
check_for_docker : bool, optional
If True, checks that Docker is running on the host machine. Default is True.
redis_port : int, optional
The port to use for connecting to redis. Default is port 6379.
redis_ip : string, optional
The ip address of the redis instance that Clipper should use.
If unspecified, a docker container running redis will be started
on `host` at the port specified by `redis_port`.
redis_persistence_path : string, optional
The directory path to which redis data should be persisted. The directory
should not already exist. If unspecified, redis will not persist data to disk.
restart_containers : bool, optional
If true, containers will restart on failure. If false, containers
will not restart automatically.
"""
def __init__(self,
host,
user=None,
key_path=None,
sudo=False,
ssh_port=22,
check_for_docker=True,
redis_ip=DEFAULT_REDIS_IP,
redis_port=DEFAULT_REDIS_PORT,
redis_persistence_path=None,
restart_containers=False):
self.redis_ip = redis_ip
self.redis_port = redis_port
self.docker_compost_dict = {
'networks': {
'default': {
'external': {
'name': DOCKER_NW
}
}
},
'services': {
'mgmt_frontend': {
'command': [
'--redis_ip=%s' % self.redis_ip,
'--redis_port=%d' % self.redis_port
],
'image':
'clipper/management_frontend:latest',
'ports': [
'%d:%d' % (CLIPPER_MANAGEMENT_PORT,
CLIPPER_MANAGEMENT_PORT)
],
'labels': {
CLIPPER_DOCKER_LABEL: ""
}
},
'query_frontend': {
'command': [
'--redis_ip=%s' % self.redis_ip,
'--redis_port=%d' % self.redis_port
],
'depends_on': ['mgmt_frontend'],
'image':
'clipper/query_frontend:latest',
'ports': [
'%d:%d' % (CLIPPER_RPC_PORT, CLIPPER_RPC_PORT),
'%d:%d' % (CLIPPER_QUERY_PORT, CLIPPER_QUERY_PORT)
],
'labels': {
CLIPPER_DOCKER_LABEL: ""
}
}
},
'version': '2'
}
start_redis = (self.redis_ip == DEFAULT_REDIS_IP)
if start_redis:
self.docker_compost_dict['services']['redis'] = {
'image': 'redis:alpine',
'ports': ['%d:%d' % (self.redis_port, self.redis_port)],
'command': "redis-server --port %d" % self.redis_port,
'labels': {
CLIPPER_DOCKER_LABEL: ""
}
}
self.docker_compost_dict['services']['mgmt_frontend'][
'depends_on'] = ['redis']
self.docker_compost_dict['services']['query_frontend'][
'depends_on'].append('redis')
if redis_persistence_path:
if not os.path.exists(redis_persistence_path):
self.docker_compost_dict['services']['redis'][
'volumes'] = ['%s:/data' % redis_persistence_path]
else:
print(
"The directory specified by the redis persistence path already exists"
)
raise ClipperManagerException(
"The directory specified by the redis persistence path already exists"
)
self.restart_containers = restart_containers
if self.restart_containers:
self.docker_compost_dict['services']['mgmt_frontend'][
'restart'] = 'always'
self.docker_compost_dict['services']['query_frontend'][
'restart'] = 'always'
if start_redis:
self.docker_compost_dict['services']['redis'][
'restart'] = 'always'
self.sudo = sudo
self.host = host
if self._host_is_local():
self.host = "localhost"
env.host_string = self.host
else:
if not user or not key_path:
print(
"user and key_path must be specified when instantiating Clipper with a nonlocal host"
)
raise ClipperManagerException(
"user and key_path must be specified when instantiating Clipper with a nonlocal host"
)
env.user = user
env.key_filename = key_path
env.host_string = "%s:%d" % (host, ssh_port)
if check_for_docker:
# Make sure docker is running on cluster
self._start_docker_if_necessary()
def _host_is_local(self):
return self.host in LOCAL_HOST_NAMES
def _start_docker_if_necessary(self):
with hide("warnings", "output", "running"):
print("Checking if Docker is running...")
self._execute_root("docker ps")
dc_installed = self._execute_root(
"docker-compose --version", warn_only=True)
if dc_installed.return_code != 0:
print("docker-compose not installed on host.")
raise ClipperManagerException(
"docker-compose not installed on host.")
nw_create_command = ("docker network create --driver bridge {nw}"
.format(nw=DOCKER_NW))
self._execute_root(nw_create_command, warn_only=True)
self._execute_standard(
"mkdir -p {model_repo}".format(model_repo=MODEL_REPO))
def _execute_root(self, *args, **kwargs):
if not self.sudo:
return self._execute_standard(*args, **kwargs)
elif self._host_is_local():
return self._execute_local(True, *args, **kwargs)
else:
return sudo(*args, **kwargs)
def _execute_standard(self, *args, **kwargs):
if self._host_is_local():
return self._execute_local(False, *args, **kwargs)
else:
return run(*args, **kwargs)
def _execute_local(self, as_root, *args, **kwargs):
if self.sudo and as_root:
root_args = list(args)
root_args[0] = "sudo %s" % root_args[0]
args = tuple(root_args)
# local is not currently capable of simultaneously printing and
# capturing output, as run/sudo do. The capture kwarg allows you to
# switch between printing and capturing as necessary, and defaults to
# False. In this case, we need to capture the output and return it.
if "capture" not in kwargs:
kwargs["capture"] = True
# fabric.local() does not accept the "warn_only"
# key word argument, so we must remove it before
# calling
if "warn_only" in kwargs:
del kwargs["warn_only"]
# Forces execution to continue in the face of an error,
# just like warn_only=True
with warn_only():
result = local(*args, **kwargs)
else:
result = local(*args, **kwargs)
return result
def _execute_append(self, filename, text, **kwargs):
if self._host_is_local():
file = open(filename, "a+")
# As with fabric.append(), we should only
# append the text if it is not already
# present within the file
if text not in file.read():
file.write(text)
file.close()
else:
append(filename, text, **kwargs)
def _execute_put(self, local_path, remote_path, *args, **kwargs):
if self._host_is_local():
# We should only copy data if the paths are different
if local_path != remote_path:
if os.path.isdir(local_path):
remote_path = os.path.join(remote_path,
os.path.basename(local_path))
# if remote_path exists, delete it because shutil.copytree requires
# that the dst path doesn't exist
if os.path.exists(remote_path):
shutil.rmtree(remote_path)
shutil.copytree(local_path, remote_path)
else:
shutil.copy2(local_path, remote_path)
else:
put(
local_path=local_path,
remote_path=remote_path,
*args,
**kwargs)
def start(self):
"""Start a Clipper instance.
"""
with hide("output", "warnings", "running"):
self._execute_standard("rm -f docker-compose.yml")
self._execute_append("docker-compose.yml",
yaml.dump(
self.docker_compost_dict,
default_flow_style=False))
print(
"Note: Docker must download the Clipper Docker images if they are not already cached. This may take awhile."
)
self._execute_root("docker-compose up -d query_frontend")
print("Clipper is running")
def register_application(self, name, model, input_type, default_output,
slo_micros):
"""Register a new Clipper application.
Parameters
----------
name : str
The name of the application.
model : str
The name of the model this application will query.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
default_output : string
The default prediction to use if the model does not return a prediction
by the end of the latency objective.
slo_micros : int
The query latency objective for the application in microseconds.
This is the processing latency between Clipper receiving a request
and sending a response. It does not account for network latencies
before a request is received or after a response is sent.
If Clipper cannot process a query within the latency objective,
the default output is returned. Therefore, it is recommended that
the objective not be set aggressively low unless absolutely necessary.
40000 (40ms) is a good starting value, but the optimal latency objective
will vary depending on the application.
"""
url = "http://%s:%d/admin/add_app" % (self.host,
CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"name": name,
"candidate_model_names": [model],
"input_type": input_type,
"default_output": default_output,
"latency_slo_micros": slo_micros
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
print(r.text)
def get_all_apps(self, verbose=False):
"""Gets information about all applications registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains application info dictionaries.
These dictionaries have the same attribute name-value pairs that were
provided to `register_application`.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no apps are registered with Clipper, an empty list is returned.
"""
url = "http://%s:1338/admin/get_all_applications" % self.host
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return r.json()
else:
print(r.text)
return None
def get_app_info(self, name):
"""Gets detailed information about a registered application.
Parameters
----------
name : str
The name of the application to look up
Returns
-------
dict
Returns a dictionary with the specified application's info. This
will contain the attribute name-value pairs that were provided to
`register_application`. If no application with name `name` is
registered with Clipper, None is returned.
"""
url = "http://%s:1338/admin/get_application" % self.host
req_json = json.dumps({"name": name})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
return None
return app_info
else:
print(r.text)
return None
def deploy_model(self,
name,
version,
model_data,
container_name,
input_type,
labels=DEFAULT_LABEL,
num_containers=1):
"""Registers a model with Clipper and deploys instances of it in containers.
Parameters
----------
name : str
The name to assign this model.
version : Any object with a string representation (with __str__ implementation)
The version to assign this model.
model_data : str or BaseEstimator
The trained model to add to Clipper. This can either be a
Scikit-Learn trained model object (an instance of BaseEstimator),
or a path to a serialized model. Note that many model serialization
formats split the model across multiple files (e.g. definition file
and weights file or files). If this is the case, `model_data` must be a path
to the root of a directory tree containing ALL the needed files.
Depending on the model serialization library you use, this may or may not
be the path you provided to the serialize method call.
container_name : str
The Docker container image to use to run this model container.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A list of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
"""
with hide("warnings", "output", "running"):
if isinstance(model_data, base.BaseEstimator):
fname = name.replace("/", "_")
pkl_path = '/tmp/%s/%s.pkl' % (fname, fname)
model_data_path = "/tmp/%s" % fname
try:
os.mkdir(model_data_path)
except OSError:
pass
joblib.dump(model_data, pkl_path)
elif isinstance(model_data, str):
# assume that model_data is a path to the serialized model
model_data_path = model_data
print("model_data_path is: %s" % model_data_path)
else:
warn("%s is invalid model format" % str(type(model_data)))
return False
version = str(version)
vol = "{model_repo}/{name}/{version}".format(
model_repo=MODEL_REPO, name=name, version=version)
# publish model to Clipper and verify success before copying model
# parameters to Clipper and starting containers
if not self._publish_new_model(
name, version, labels, input_type, container_name,
os.path.join(vol, os.path.basename(model_data_path))):
return False
print("Published model to Clipper")
if (not self._put_container_on_host(container_name)):
return False
# Put model parameter data on host
with hide("warnings", "output", "running"):
self._execute_standard("mkdir -p {vol}".format(vol=vol))
with cd(vol):
with hide("warnings", "output", "running"):
self._execute_put(model_data_path, vol)
print("Copied model data to host")
# aggregate results of starting all containers
return all([
self.add_container(name, version)
for r in range(num_containers)
])
def register_external_model(self,
name,
version,
input_type,
labels=DEFAULT_LABEL):
"""Registers a model with Clipper without deploying it in any containers.
Parameters
----------
name : str
The name to assign this model.
version : Any object with a string representation (with __str__ implementation)
The version to assign this model.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A list of strings annotating the model.
"""
version = str(version)
return self._publish_new_model(name, version, labels, input_type,
EXTERNALLY_MANAGED_MODEL,
EXTERNALLY_MANAGED_MODEL)
def _save_python_function(self, name, predict_function):
relative_base_serializations_dir = "predict_serializations"
predict_fname = "predict_func.pkl"
environment_fname = "environment.yml"
conda_dep_fname = "conda_dependencies.txt"
pip_dep_fname = "pip_dependencies.txt"
# Serialize function
s = StringIO()
c = CloudPickler(s, 2)
c.dump(predict_function)
serialized_prediction_function = s.getvalue()
# Set up serialization directory
serialization_dir = os.path.join(
'/tmp', relative_base_serializations_dir, name)
if not os.path.exists(serialization_dir):
os.makedirs(serialization_dir)
# Export Anaconda environment
environment_file_abs_path = os.path.join(serialization_dir,
environment_fname)
conda_env_exported = self._export_conda_env(environment_file_abs_path)
if conda_env_exported:
print("Anaconda environment found. Verifying packages.")
# Confirm that packages installed through conda are solvable
# Write out conda and pip dependency files to be supplied to container
if not (self._check_and_write_dependencies(
environment_file_abs_path, serialization_dir,
conda_dep_fname, pip_dep_fname)):
return False
print("Supplied environment details")
else:
print(
"Warning: Anaconda environment was either not found or exporting the environment "
"failed. Your function will still be serialized and deployed, but may fail due to "
"missing dependencies. In this case, please re-run inside an Anaconda environment. "
"See http://clipper.ai/documentation/python_model_deployment/ for more information."
)
# Write out function serialization
func_file_path = os.path.join(serialization_dir, predict_fname)
with open(func_file_path, "w") as serialized_function_file:
serialized_function_file.write(serialized_prediction_function)
print("Serialized and supplied predict function")
return serialization_dir
def deploy_pyspark_model(self,
name,
version,
predict_function,
pyspark_model,
sc,
input_type,
labels=DEFAULT_LABEL,
num_containers=1):
"""Deploy a Spark MLLib model to Clipper.
Parameters
----------
name : str
The name to assign this model.
version : Any object with a string representation (with __str__ implementation)
The version to assign this model.
predict_function : function
A function that takes three arguments, a SparkContext, the ``model`` parameter and
a list of inputs of the type specified by the ``input_type`` argument.
Any state associated with the function other than the Spark model should
be captured via closure capture. Note that the function must not capture
the SparkContext or the model implicitly, as these objects are not pickleable
and therefore will prevent the ``predict_function`` from being serialized.
pyspark_model : pyspark.mllib.util.Saveable
An object that mixes in the pyspark Saveable mixin. Generally this
is either an mllib model or transformer. This model will be loaded
into the Clipper model container and provided as an argument to the
predict function each time it is called.
sc : SparkContext
The SparkContext associated with the model. This is needed
to save the model for pyspark.mllib models.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A set of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
Returns
-------
bool
True if the model was successfully deployed. False otherwise.
"""
model_class = re.search("pyspark.*'",
str(type(pyspark_model))).group(0).strip("'")
if model_class is None:
raise ClipperManagerException(
"pyspark_model argument was not a pyspark object")
# save predict function
serialization_dir = self._save_python_function(name, predict_function)
# save Spark model
spark_model_save_loc = os.path.join(serialization_dir,
"pyspark_model_data")
try:
# we only import pyspark here so that if the caller of the library does
# not want to use this function, clipper_manager does not have a dependency
# on pyspark
import pyspark
if isinstance(pyspark_model, pyspark.ml.pipeline.PipelineModel):
pyspark_model.save(spark_model_save_loc)
else:
pyspark_model.save(sc, spark_model_save_loc)
except Exception as e:
print("Error saving spark model: %s" % e)
raise e
pyspark_container = "clipper/pyspark-container"
# extract the pyspark class name. This will be something like
# pyspark.mllib.classification.LogisticRegressionModel
with open(os.path.join(serialization_dir, "metadata.json"),
"w") as metadata_file:
json.dump({"model_class": model_class}, metadata_file)
print("Spark model saved")
# Deploy model
deploy_result = self.deploy_model(name, version, serialization_dir,
pyspark_container, input_type,
labels, num_containers)
# Remove temp files
shutil.rmtree(serialization_dir)
return deploy_result
def deploy_predict_function(self,
name,
version,
predict_function,
input_type,
labels=DEFAULT_LABEL,
num_containers=1):
"""Deploy an arbitrary Python function to Clipper.
The function should take a list of inputs of the type specified by `input_type` and
return a Python or numpy array of predictions as strings. All dependencies for the function
must be installed with Anaconda or Pip and this function must be called from within an Anaconda
environment.
Parameters
----------
name : str
The name to assign this model.
version : Any object with a string representation (with __str__ implementation)
The version to assign this model.
predict_function : function
The prediction function. Any state associated with the function should be
captured via closure capture.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A list of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
Returns
-------
bool
True if the model was successfully deployed. False otherwise.
Example
-------
Define a feature function ``center()`` and train a model on the featurized input::
def center(xs):
means = np.mean(xs, axis=0)
return xs - means
centered_xs = center(xs)
model = sklearn.linear_model.LogisticRegression()
model.fit(centered_xs, ys)
def centered_predict(inputs):
centered_inputs = center(inputs)
return model.predict(centered_inputs)
clipper.deploy_predict_function(
"example_model",
1,
centered_predict,
"doubles",
num_containers=1)
"""
default_python_container = "clipper/python-container"
serialization_dir = self._save_python_function(name, predict_function)
# Deploy function
deploy_result = self.deploy_model(name, version, serialization_dir,
default_python_container, input_type,
labels, num_containers)
# Remove temp files
shutil.rmtree(serialization_dir)
return deploy_result
def get_all_models(self, verbose=False):
"""Gets information about all models registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the models' names.
If set to True, the list contains model info dictionaries.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no models are registered with Clipper, an empty list is returned.
"""
url = "http://%s:1338/admin/get_all_models" % self.host
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return r.json()
else:
print(r.text)
return None
def get_model_info(self, model_name, model_version):
"""Gets detailed information about a registered model.
Parameters
----------
model_name : str
The name of the model to look up
model_version : Any object with a string representation (with __str__ implementation)
The version of the model to look up
Returns
-------
dict
Returns a dictionary with the specified model's info.
If no model with name `model_name@model_version` is
registered with Clipper, None is returned.
"""
model_version = str(model_version)
url = "http://%s:1338/admin/get_model" % self.host
req_json = json.dumps({
"model_name": model_name,
"model_version": model_version
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
return None
return app_info
else:
print(r.text)
return None
def get_all_containers(self, verbose=False):
"""Gets information about all containers registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains container info dictionaries.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no containerss are registered with Clipper, an empty list is returned.
"""
url = "http://%s:1338/admin/get_all_containers" % self.host
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return r.json()
else:
print(r.text)
return None
def get_container_info(self, model_name, model_version, replica_id):
"""Gets detailed information about a registered container.
Parameters
----------
model_name : str
The name of the container to look up
model_version : Any object with a string representation (with __str__ implementation)
The version of the container to look up
replica_id : int
The container replica to look up
Returns
-------
dict
A dictionary with the specified container's info.
If no corresponding container is registered with Clipper, None is returned.
"""
model_version = str(model_version)
url = "http://%s:1338/admin/get_container" % self.host
req_json = json.dumps({
"model_name": model_name,
"model_version": model_version,
"replica_id": replica_id,
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
return None
return app_info
else:
print(r.text)
return None
def _inspect_selection_policy(self, app_name, uid):
# NOTE: This method is private (it's still functional, but it won't be documented)
# until Clipper supports different selection policies
"""Fetches a human-readable string with the current selection policy state.
Parameters
----------
app_name : str
The application whose policy state should be inspected.
uid : int
The user whose policy state should be inspected. The convention
in Clipper is to use 0 as the default user ID, but this may be
application specific.
Returns
-------
str
The string describing the selection state. Note that if the
policy state was not found, this string may contain an error
message from Clipper describing the problem.
"""
url = "http://%s:%d/admin/get_state" % (self.host,
CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"app_name": app_name,
"uid": uid,
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
return r.text
def _export_conda_env(self, environment_file_abs_path):
"""Returns true if attempt to export the current conda environment is successful
Parameters
----------
environment_file_abs_path : str
The desired absolute path for the exported conda environment file
"""
process = subprocess.Popen(
"PIP_FORMAT=legacy conda env export >> {environment_file_abs_path}".
format(environment_file_abs_path=environment_file_abs_path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
process.wait()
return process.returncode == 0
def _check_and_write_dependencies(self, environment_path, directory,
conda_dep_fname, pip_dep_fname):
"""Returns true if the provided conda environment is compatible with the container os.
If packages listed in specified conda environment file have conflicting dependencies,
this function will warn the user and return False.
If there are no conflicting package dependencies, existence of the packages in the
container conda channel is tested. The user is warned about any missing packages.
All existing conda packages are written out to `conda_dep_fname` and pip packages
to `pip_dep_fname` in the given `directory`. This function then returns True.
Parameters
----------
environment_path : str
The path to the input conda environment file
directory : str
The path to the diretory containing the environment file
conda_dep_fname : str
The name of the output conda dependency file
pip_dep_fname : str
The name of the output pip dependency file
Returns
-------
bool
Returns True if the packages specified in `environment_fname` are compatible with conda
on the container os. Otherwise returns False.
"""
if "CONDA_PREFIX" not in os.environ:
print("No Anaconda environment found")
return False
root_prefix = os.environ["CONDA_PREFIX"].split("envs")[0]
py_path = os.path.join(root_prefix, "bin", "python")
process = subprocess.Popen(
"{py_path} {cur_dir}/check_and_write_deps.py {environment_path} {directory} {platform} {conda_dep_fname} {pip_dep_fname}".
format(
py_path=py_path,
cur_dir=cur_dir,
environment_path=environment_path,
directory=directory,
platform=CONTAINER_CONDA_PLATFORM,
conda_dep_fname=conda_dep_fname,
pip_dep_fname=pip_dep_fname),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = process.communicate()
print(out)
print(err)
return process.returncode == 0
def add_container(self, model_name, model_version):
"""Create a new container for an existing model.
Starts a new container for a model that has already been added to
Clipper. Note that models are uniquely identified by both name
and version, so this method will fail if you have not already called
`Clipper.deploy_model()` for the specified name and version.
Parameters
----------
model_name : str
The name of the model
model_version : Any object with a string representation (with __str__ implementation)
The version of the model
Returns
----------
bool
True if the container was added successfully and False
if the container could not be added.
"""
model_version = str(model_version)
with hide("warnings", "output", "running"):
# Look up model info in Redis
if self.redis_ip == DEFAULT_REDIS_IP:
redis_host = self.host
else:
redis_host = self.redis_ip
model_key = "{mn}:{mv}".format(mn=model_name, mv=model_version)
result = local(
"redis-cli -h {host} -p {redis_port} -n {db} hgetall {key}".
format(
host=redis_host,
redis_port=self.redis_port,
key=model_key,
db=REDIS_MODEL_DB_NUM),
capture=True)
print(result)
if "empty list or set" in result.stdout:
# Model not found
warn("Trying to add container but model {mn}:{mv} not in "
"Redis".format(mn=model_name, mv=model_version))
return False
splits = result.stdout.split("\n")
model_metadata = dict([(splits[i].strip(), splits[i + 1].strip())
for i in range(0, len(splits), 2)])
image_name = model_metadata["container_name"]
model_data_path = model_metadata["model_data_path"]
model_input_type = model_metadata["input_type"]
restart_policy = 'always' if self.restart_containers else 'no'
if image_name != EXTERNALLY_MANAGED_MODEL:
# Start container
add_container_cmd = (
"docker run -d --network={nw} --restart={restart_policy} -v {path}:/model:ro "
"-e \"CLIPPER_MODEL_NAME={mn}\" -e \"CLIPPER_MODEL_VERSION={mv}\" "
"-e \"CLIPPER_IP=query_frontend\" -e \"CLIPPER_INPUT_TYPE={mip}\" -l \"{clipper_label}\" -l \"{mv_label}\" "
"{image}".format(
path=model_data_path,
nw=DOCKER_NW,
image=image_name,
mn=model_name,
mv=model_version,
mip=model_input_type,
clipper_label=CLIPPER_DOCKER_LABEL,
mv_label="%s=%s:%s" % (CLIPPER_MODEL_CONTAINER_LABEL,
model_name, model_version),
restart_policy=restart_policy))
result = self._execute_root(add_container_cmd)
return result.return_code == 0
else:
print("Cannot start containers for externally managed model %s"
% model_name)
return False
def get_clipper_logs(self):
"""Copies the logs from all Docker containers running on the host machine
that have been tagged with the Clipper label (ai.clipper.container.label) into
the local filesystem.
Returns
-------
list(str)
Returns a list of local filenames containing the Docker container log snapshots.
"""
container_ids = self._get_clipper_container_ids()
cur_time_logs_path = os.path.join(CLIPPER_LOGS_PATH,
time.strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(cur_time_logs_path):
os.makedirs(cur_time_logs_path)
log_file_names = []
for container in container_ids:
output = self._execute_root(
"docker logs {container}".format(container=container))
cur_log_fname = os.path.join(cur_time_logs_path,
"%s-container.log" % container)
with open(cur_log_fname, "w") as f:
f.write(output)
log_file_names.append(cur_log_fname)
return log_file_names
def _get_clipper_container_ids(self):
"""
Gets the container IDs of all containers labeled with the clipper label
"""
containers = self._execute_root(
"docker ps -aq --filter label={clipper_label}".format(
clipper_label=CLIPPER_DOCKER_LABEL))
ids = [l.strip() for l in containers.split("\n")]
print("Clipper container IDS found: %s" % str(ids))
return ids
def inspect_instance(self):
"""Fetches metrics from the running Clipper instance.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
"""
url = "http://%s:%d/metrics" % (self.host, CLIPPER_QUERY_PORT)
r = requests.get(url)
try:
s = r.json()
except TypeError:
s = r.text
return s
def set_model_version(self, model_name, model_version, num_containers=0):
"""Changes the current model version to `model_version`.
This method can be used to do model rollback and rollforward to
any previously deployed version of the model. Note that model
versions automatically get updated when `deploy_model()` is
called, so there is no need to manually update the version as well.
Parameters
----------
model_name : str
The name of the model
model_version : Any object with a string representation (with __str__ implementation)
The version of the model. Note that `model_version`
must be a model version that has already been deployed.
num_containers : int
The number of new containers to start with the newly
selected model version.
"""
model_version = str(model_version)
url = "http://%s:%d/admin/set_model_version" % (
self.host, CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"model_name": model_name,
"model_version": model_version
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
print(r.text)
for r in range(num_containers):
self.add_container(model_name, model_version)
def remove_inactive_containers(self, model_name):
"""Removes all containers serving stale versions of the specified model.
Parameters
----------
model_name : str
The name of the model whose old containers you want to clean.
"""
# Get all Docker containers tagged as model containers
num_containers_removed = 0
with hide("output", "warnings", "running"):
containers = self._execute_root(
"docker ps -aq --filter label={model_container_label}".format(
model_container_label=CLIPPER_MODEL_CONTAINER_LABEL))
if len(containers) > 0:
container_ids = [l.strip() for l in containers.split("\n")]
for container in container_ids:
# returns a string formatted as "<model_name>:<model_version>"
if self._host_is_local():
container_model_name_and_version = self._execute_root(
"docker inspect --format \"{{ index .Config.Labels \\\"%s\\\"}}\" %s"
% (CLIPPER_MODEL_CONTAINER_LABEL, container))
else:
container_model_name_and_version = self._execute_root(
"docker inspect --format \"{{ index .Config.Labels \\\\\"%s\\\\\"}}\" %s"
% (CLIPPER_MODEL_CONTAINER_LABEL, container))
splits = container_model_name_and_version.split(":")
container_model_name = splits[0]
container_model_version = int(splits[1])
if container_model_name == model_name:
# check if container_model_version is the currently deployed version
model_info = self.get_model_info(
container_model_name, container_model_version)
if model_info == None or not model_info["is_current_version"]:
self._execute_root("docker stop {container}".
format(container=container))
self._execute_root("docker rm {container}".format(
container=container))
num_containers_removed += 1
print("Removed %d inactive containers for model %s" %
(num_containers_removed, model_name))
return num_containers_removed
def stop_all(self):
"""Stops and removes all Clipper Docker containers on the host.
"""
print("Stopping Clipper and all running models...")
with hide("output", "warnings", "running"):
container_ids = self._get_clipper_container_ids()
container_id_str = " ".join(container_ids)
self._execute_root(
"docker stop {ids}".format(ids=container_id_str),
warn_only=True)
self._execute_root(
"docker rm {ids}".format(ids=container_id_str), warn_only=True)
def _publish_new_model(self, name, version, labels, input_type,
container_name, model_data_path):
url = "http://%s:%d/admin/add_model" % (self.host,
CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"model_name": name,
"model_version": version,
"labels": labels,
"input_type": input_type,
"container_name": container_name,
"model_data_path": model_data_path
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return True
else:
print("Error publishing model: %s" % r.text)
return False
def _put_container_on_host(self, container_name):
"""Puts the provided container on the host.
Parameters
__________
container_name : str
The name of the container.
Notes
-----
This method will first check the host, then Docker Hub, then the local
machine to find the container.
This method is safe to call multiple times with the same container name.
Subsequent calls will detect that the container is already present on
the host and do nothing.
"""
with hide("output", "warnings", "running"):
# first see if container is already present on host
host_result = self._execute_root(
"docker images -q {cn}".format(cn=container_name))
if len(host_result.stdout) > 0:
print("Found %s on host" % container_name)
return True
# now try to pull from Docker Hub
hub_result = self._execute_root(
"docker pull {cn}".format(cn=container_name), warn_only=True)
if hub_result.return_code == 0:
print("Found %s in Docker hub" % container_name)
return True
# assume container_name refers to a local container and
# copy it to host
local_result = local(
"docker images -q {cn}".format(cn=container_name))
if len(local_result.stdout) > 0:
saved_fname = container_name.replace("/", "_")
subprocess.call("docker save -o /tmp/{fn}.tar {cn}".format(
fn=saved_fname, cn=container_name))
tar_loc = "/tmp/{fn}.tar".format(fn=saved_fname)
self._execute_put(tar_loc, tar_loc)
self._execute_root("docker load -i {loc}".format(loc=tar_loc))
# self._execute_root("docker tag {image_id} {cn}".format(
# image_id=image_id, cn=cn))
# now check to make sure we can access it
host_result = self._execute_root(
"docker images -q {cn}".format(cn=container_name))
if len(host_result.stdout) > 0:
print("Successfuly copied %s to host" % container_name)
return True
else:
warn("Problem copying container %s to host" %
container_name)
return False
# out of options
warn("Could not find %s, please try with a valid "
"container docker image")
return False
def deploy_R_model(self,
name,
version,
model_data,
labels=DEFAULT_LABEL,
num_containers=1):
"""Registers a model with Clipper and deploys instances of it in containers.
Parameters
----------
name : str
The name to assign this model.
version : int
The version to assign this model.
model_data :
The trained model to add to Clipper.The type has to be rpy2.robjects.vectors.ListVector,
this is how python's rpy2 encapsulates any given R model.This model will be loaded
into the Clipper model container and provided as an argument to the
predict function each time it is called.
container_name : str
The Docker container image to use to run this model container.
input_type : str
"strings" (from which model specific dataframes can be derived for carrying out predictions).
labels : list of str, optional
A set of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
"""
# importing some R specific dependencies
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
base = importr('base')
input_type="strings"
container_name = "clipper/r_python_container"
with hide("warnings", "output", "running"):
fname = name.replace("/", "_")
rds_path = '/tmp/%s/%s.rds' % (fname, fname)
model_data_path = "/tmp/%s" % fname
try:
os.mkdir(model_data_path)
except OSError:
pass
base.saveRDS(model_data, rds_path)
vol = "{model_repo}/{name}/{version}".format(
model_repo=MODEL_REPO, name=name, version=version)
# publish model to Clipper and verify success before copying model
# parameters to Clipper and starting containers
if not self._publish_new_model(
name, version, labels, input_type, container_name,
os.path.join(vol, os.path.basename(model_data_path))):
return False
print("Published model to Clipper")
# Put model parameter data on host
with hide("warnings", "output", "running"):
self._execute_standard("mkdir -p {vol}".format(vol=vol))
with hide("output", "running"):
self._execute_put(model_data_path, vol)
print("Copied model data to host")
# aggregate results of starting all containers
return all([
self.add_container(name, version)
for r in range(num_containers)
])
|
|
# QMS Classes/Functions stored in this file
# Main methods are used from Shipyard class
# (ie. traverseGrabPackageInside uses traverseGrabPackage
# within container class) to access container/package class methods.
#
# double linked list functions taken from Ron's lecture slides / DLListV2.py
# to hold the container nodes
class Shipyard :
# this Node class acts as the holder of each element of the linked list
# each node holds a value for the forward and backwards element unless it is a head or tail which it is then None.
class _Node :
def __init__(self, elem, next = None, back = None) :
self._elem = elem
self._next = next
self._back = back
# on initialization, this list is empty so make sure front and rear are None
# same with the size, it is 0 until something is put into it.
def __init__(self) :
self._header = self._Node(None) # prepares the sentinel node
self._trailer = self._Node(None) # prepares the sentinel node
self._header._next = self._trailer # prepares the sentinel node
self._trailer._back = self._header # prepares the sentinel node
self._size = 0 # initial size of doubly linked list
self.uniqueID = 0 # unique id generation for packages
self.uniqueContainerID = 0 # unique id generation for containers
self.containers = ""
# main yard data file, can save/load
# current functioning system into this file
self._destContainer = ""
# overload len so that if you len(q) it will show the number of items in the linked list
def __len__(self) :
return self._size # details how many containers there are in the shipyard
# create a container node holding pointer to container object
# elem = destination, id = container id
# they are alphabetically sorted
def createContainer(self, id, elem) :
# if file loading, uses this function and puts in saved id's in text file
if id != "None":
self.containers = self.Container(id, "Calgary")
# if creating a container, the system will generate a id
if id == "None":
self.uniqueContainerID += 1
testcid = self.traversePrint(self.uniqueContainerID)
# keep trying new container id's till one is available
while (testcid == "used"):
self.uniqueContainerID += 1
testcid = self.traversePrint(self.uniqueContainerID)
self.containers = self.Container(testcid, "Calgary")
# store the id as id for later
id = testcid
# sorted insert containers in alphabetical order
# increase size + 1
self._size += 1
# Duplicate values are inserted in this method. I have a check function for duplicates.
insPoint = self._header
while (insPoint._next != self._trailer and elem >= insPoint._next._elem) :
insPoint = insPoint._next
postPoint = insPoint._next
newNode = self._Node(elem, postPoint, insPoint) # creation of new node
newNode._container = self.containers # contain the pointer to the created container
newNode._container._id = id # store container id
newNode._container._destination = elem # store destination information inside the container
insPoint._next = newNode # to allow sorted insert
postPoint._back = newNode # to allow sorted insert
return (self.containers)
# Packages are sorted by weight.
# Creates a package, checks if container is ready, makes one if it isn't
# then, inserts package into the container.
# it will check if weight will exceed 2000 lbs within container, if so,
# a new container is made
# a check for a package exceeding 2000 lbs is done via the menu
# the system will not allow a package over 2000 lbs entered via the menu
def createPackage(self, id, name, destination, weight) :
# init vars
#packageholder holds pointer to created package object
packageholder = ""
#_destContainer holds pointer to container object
self._destContainer = ""
# traverse containers to check for matching destination + suitable weight
self._destContainer = self.traverseFindDest(destination, weight)
# if new container needed where no container made
# or not enough weight
if (self._destContainer == "new container needed"):
self._destContainer = self.createContainer("None", destination)
# if given id, use given id (when loading from file)
if id != "None":
packageholder = self.Package( str(id), name, weight, destination)
#save pointer to package in container node
self._destContainer.insertPackage(packageholder)
# increase number of packages by 1
# increase total weight of container
self._destContainer._numpkgs += 1
self._destContainer._weight += int(weight)
# when inserting package via menu with no id, system generates id
if id == "None":
self.uniqueID += 1
# make sure packageID is unique
checkPID = self.traverseGrabPackageInside(str(self.uniqueID))
# duplication check on unique id, increase by 1 if id found
while (checkPID[0] != "Package Not Found"):
self.uniqueID += 1
checkPID = self.traverseGrabPackageInside(str(self.uniqueID))
packageholder = self.Package( str(self.uniqueID), name, weight, destination)
#save pointer to package in container node
self._destContainer.insertPackage(packageholder)
# increase count of number of packages
self._destContainer._numpkgs += 1
# decrease weight by removed package
self._destContainer._weight += int(weight)
# Store package into specified container
# This function is used when loading yard data with containers and packages
def loadStorePackage(self, id, name, destination, weight, cid) :
# init vars
packageholder = ""
self._destContainer = ""
# traverse containers to check for matching destination + suitable weight
self._destContainer = self.traverseFindByCID(cid)
# if new container needed where no container made
# or not enough weight
if (self._destContainer == "new container needed"):
self._destContainer = self.createContainer("None", destination)
# if given id, use given id (when loading from file)
if id != "None":
packageholder = self.Package( str(id), name, weight, destination)
#save pointer to package in container node
# increase container's number of package count
# increase total weight of container
self._destContainer.insertPackage(packageholder)
self._destContainer._numpkgs += 1
self._destContainer._weight += int(weight)
# when inserting package via menu with no id, system generates id
if id == "None":
self.uniqueID += 1
# make sure packageID is unique
checkPID = self.traverseGrabPackageInside(str(self.uniqueID))
# duplication check on unique id, increase by 1 if id found
while (checkPID[0] != "Package Not Found"):
self.uniqueID += 1
checkPID = self.traverseGrabPackageInside(str(self.uniqueID))
packageholder = self.Package( str(self.uniqueID), name, weight, destination)
#save pointer to package in container node
self._destContainer.insertPackage(packageholder)
# increase count of number of packages
self._destContainer._numpkgs += 1
# decrease weight by removed package
self._destContainer._weight += int(weight)
# delete specific package by its package ID looking through containers
# it accesses the function deletePackageByID in the container class
# once the container containing the package is found
# otherwise it reports package not found
def deletePackageByIDInside(self, findpkgid) :
tmpRef = self._header._next
while (tmpRef != self._trailer):
findpkg = tmpRef._container.traverseGrabPackage(findpkgid)
if (findpkg[0] != "Package Not Found"):
tmpRef._container.deletePackageByID(int(findpkg[0]))
tmpRef._container._numpkgs -= 1
tmpRef._container._weight -= int(findpkg[3])
return ("Package Deleted From System")
tmpRef = tmpRef._next
return ("Package Not Found", "", "", "", "")
# Delete Containers based on destination which accesses container CID
# to delete specified container
# Not used, but it is a useful function.
# it can delete all the containers that have the same destination given.
def deleteContainersDest(self, dest) :
tmpRef = self._header._next
# keep doing till all containers with the destination are removed
while (tmpRef != self._trailer):
# if matching destination, delete container by matching CID
if (tmpRef._container._destination == dest):
self.deleteContainerByCID(tmpRef._container._id)
tmpRef = tmpRef._next
return ("Package Not Found", "", "", "", "")
# Delete based on container id
def deleteContainerByCID(self, elem) :
# This performs a sequential search so it can
# be used for any DLList().
# decrease size by 1
self._size -= 1
delPoint = self._header._next
# when found, remove the element and update the doubly linked list
while (delPoint != self._trailer and str(elem) != str(delPoint._container._id)) :
delPoint = delPoint._next
if (delPoint == self._trailer) :
print ("No more containers to delete")
return
pred = delPoint._back
succ = delPoint._next
pred._next = succ
succ._back = pred
return
# traversePrint is used to check container to make sure
# container id is unique when container is created
def traversePrint(self, cid) :
tmpRef = self._header._next
while (tmpRef != self._trailer) :
if tmpRef._container._id == cid:
return ("used")
tmpRef = tmpRef._next
# return container id because it is unused
return (cid)
# prints out shipping manifest of containers and its package contents with
# matching criteria for destination
# uses containerPackageInfo function to display package info of containers
def shippingManifestDest(self, destination) :
# Display headings
print ("---Shipping Manifest With Specific Destination---")
print ("-"*25)
tmpRef = self._header._next
# while traversing the nodes, detailing information for specific destination
while (tmpRef != self._trailer) :
if (tmpRef._container._destination.upper() == destination.upper()):
print("Container ID: " + str(tmpRef._container._id) + ", Destination: " + str(tmpRef._container._destination) + ", Total Weight: " + str(tmpRef._container._weight) + ", Number of Packages: " + str(tmpRef._container._numpkgs))
print ("[Package Contents]")
# print out package information for the container
tmpRef._container.containerPackageInfo()
print ("")
tmpRef = tmpRef._next
return
# prints out entire shipping yard manifest of containers and its package contents
# uses containerPackageInfo function to display package info of containers
def shippingManifestAll(self) :
# Display headings
print ("---Shipping Manifest With All Information---")
print ("-"*25)
tmpRef = self._header._next
while (tmpRef != self._trailer) :
# Display container info
print("Container ID: " + str(tmpRef._container._id) + ", Destination: " + str(tmpRef._container._destination) + ", Total Weight: " + str(tmpRef._container._weight) + ", Number of Packages: " + str(tmpRef._container._numpkgs))
print ("[Package Contents]")
# call container func to grab packages info
tmpRef._container.containerPackageInfo()
print ("") # new line spacing to space out each container
tmpRef = tmpRef._next
return
# Prints out a list of containers with their information, including
# capacity but not their contents.
def getContainersNoContents(self) :
# Display headings
print ("---Container Information With Capacity No Contents---")
print ("-"*25 + "\n")
tmpRef = self._header._next
while (tmpRef != self._trailer) :
# set up capacity variable since max load is 2000 lbs.
wgt = 2000 - int(tmpRef._container._weight)
# Display container info
print("Container ID: " + str(tmpRef._container._id) + ", Destination: " + str(tmpRef._container._destination) + ", Total Weight: " + str(tmpRef._container._weight) + ", Number of Packages: " + str(tmpRef._container._numpkgs) + ", Capacity: " + str(wgt))
tmpRef = tmpRef._next
return
# Search containers by container id
# this function is used for loading packages into specific containers
# determined by loading a Yard Data file
def traverseFindByCID(self, cid) :
# in the case that the doubly linked list of containers have no contents
# make the container to contain the package
tmpRef = self._header._next # copy the address of the head node into node
while (tmpRef != self._trailer):
if tmpRef._container._id == cid:
# return the container with matching container id
return (tmpRef._container)
tmpRef = tmpRef._next
return
# search containers to find destination
def traverseFindDest(self, dest, weight) :
# in the case that the doubly linked list of containers have no contents
# make the container to contain the package
tmpRef = self._header._next # copy the address of the head node into node
# traverse nodes
while (tmpRef != self._trailer):
# if destination matches, and weight is okay, return container that
# will fit the package
if tmpRef._container._destination == dest:
if (int(tmpRef._container._weight) + int(weight)) <= 2000:
return (tmpRef._container)
tmpRef = tmpRef._next
# new container needed if no container that can fit
return ("new container needed")
# This function tells how many containers are in the system.
# Unused, but useful function.
def checkContainerCount(self):
count=0
tmpRef = self._header._next
while (tmpRef != self._trailer) :
count+=1
tmpRef = tmpRef._next
return (count)
# It will ship out containers from the shipyard, removing the containers
# information from the system. Will report # of containers and total weight
# delivered.
def shipOutContainersDest(self, dest):
container_Count = 0
total_Weight = 0
# pass dest and 0 as weight because we are not using weight
theContainer = self.traverseFindDest(dest, 0)
if theContainer == "new container needed":
print ("\nNo containers to be shipped for this destination.")
container_Count = 0
total_Weight = 0
return
# add each package weight in the container to the total_Weight
# add count to each container being sent to the destination
total_Weight += int(theContainer._weight)
container_Count += 1
self.deleteContainerByCID(theContainer._id)
theContainer = self.traverseFindDest(dest, 0)
# while loop to delete all containers to the destination
# keep checking containers till no more containers with the destination
while (theContainer != "new container needed"):
container_Count += 1
total_Weight += int(theContainer._weight)
self.deleteContainerByCID(theContainer._id)
theContainer = self.traverseFindDest(dest, 0)
# display how many containers shipped and the weight
print (str(container_Count) + " Containers Shipped to " + str(dest))
print ("Total of " + str(total_Weight) + " lbs. shipped to " + str(dest))
container_Count = 0
total_Weight = 0
# It traverses the containers and searches for matching package id
# using the container method, traverseGrabPackage
def traverseGrabPackageInside(self, findpkgid) :
tmpRef = self._header._next
# traverse nodes
while (tmpRef != self._trailer):
# while package not found yet, keep cycling
findpkg = tmpRef._container.traverseGrabPackage(findpkgid)
if (findpkg[0] != "Package Not Found"):
findpkgupdate = (findpkg[0], findpkg[1], findpkg[2], findpkg[3], tmpRef._container._id)
# if found, return the values so it can be displayed in the menu
return (findpkgupdate)
tmpRef = tmpRef._next
# if package is not found return that it is not found
return ("Package Not Found", "", "", "", "")
# Function to save a state of the shipyard into a designated filename
# save current yard data to container and package format file
def saveYardData(self, filename) :
wholeString = ""
saveString = ""
print ("---Saving All Shipyard Data---")
print ("-"*25)
tmpRef = self._header._next
while (tmpRef != self._trailer) :
# to indicate it is the container
saveString = "% "
# join together save string data
saveString = saveString + str(tmpRef._container._id) + ", " + str(tmpRef._container._destination)
# call container func to grab packages info
saveString = tmpRef._container.containerSavePackageInfo(saveString)
wholeString = wholeString + saveString
tmpRef = tmpRef._next
file = open(filename, "w")
file.write(wholeString)
print ("Yard Data Saved to " + str(filename))
return
# load Yard Data into system so the system can shutdown and resume where it left off
def loadYardData(self, filename) :
# set initial states for comma detection
contcomma = False
firstcomma = False
secondcomma = False
# initialize variables
box1 = ""
box2 = ""
box3 = ""
contbox1 = ""
contbox2 = ""
# open the file for reading
file = open (filename)
#store file data into readYardData
readYardData = file.readlines()
# deal with % meaning this is a container
# resets destination, contbox2 when a new container is found
for data in readYardData:
if data[0] == '%':
contbox1 = ""
contbox2 = ""
# organize data based on text file format
for cont_entry in data:
for letter in cont_entry:
if letter != ',' and letter != '%' and contcomma == False:
contbox1 = contbox1 + letter
if letter == ',' and contcomma == False:
contcomma = True
if letter != ',' and contcomma == True:
contbox2 = contbox2 + letter
# strip whitespaces from beginning and end
contbox1 = contbox1.strip()
contbox2 = contbox2.strip().upper()
# create containers given the information
self.createContainer(int(contbox1), contbox2)
contcomma = False
# deal with packages portion of loading
if data[0] != '%':
for letter in data:
if letter != ',' and firstcomma == False and secondcomma == False:
box1 = box1 + letter
if letter ==',' and firstcomma == True and secondcomma == False:
secondcomma = True
if letter == ',' and firstcomma == False and secondcomma == False:
firstcomma = True
if letter != ',' and firstcomma == True and secondcomma == False:
box2 = box2 + letter
if letter != ',' and firstcomma == True and secondcomma == True:
box3 = box3 + letter
# strip white spaces from beginning and end
box1 = box1.strip()
box2 = box2.strip().upper()
box3 = box3.strip().upper()
# create packages, contbox2 = destination grabbed from above
self.loadStorePackage(box1, box2, contbox2, str(box3), int(contbox1))
# reset the box to grab next value
box1 = ""
box2 = ""
box3 = ""
# reset the comma checks
firstcomma = False
secondcomma = False
print ("Yard Data Loaded from " + str(filename))
# function to load package-only data into system ie. packages.txt
def loadPackagesFromFile(self, filename):
global box1, box2, box3
box1=""
box2=""
box3=""
firstcomma = False
secondcomma = False
file = open (filename)
readYardData = file.readlines()
for entry in readYardData:
for letter in entry:
if letter != ',' and firstcomma == False and secondcomma == False:
box1 = box1 + letter
if letter ==',' and firstcomma == True and secondcomma == False:
secondcomma = True
if letter == ',' and firstcomma == False and secondcomma == False:
firstcomma = True
if letter != ',' and firstcomma == True and secondcomma == False:
box2 = box2 + letter
if letter != ',' and firstcomma == True and secondcomma == True:
box3 = box3 + letter
# set upper case to necessary values so it is easier to search
# strip is to strip the white spaces
box1 = box1.upper().strip()
box2 = box2.upper().strip()
box3 = box3.strip()
self.createPackage("None", box1, box2, int(box3))
box1 = ""
box2 = ""
box3 = ""
firstcomma = False
secondcomma = False
print ("Packages loaded from " + str(filename))
# Container class where a new container is created in the circumstance that there
# is a new destination or when packages to the same destination exceed 2000 lbs.
# Uses a doubly linked list.
class Container :
def __init__(self, id, destination) :
self._header = self._Node(None) # sentinel node
self._trailer = self._Node(None) # sentinel node
self._header._next = self._trailer # set up linking
self._trailer._back = self._header # set up linking
self._size = 0 # initial size of doubly linked list
self._numpkgs = 0 # container variable for number of packages
self._weight = 0 # sum of weight of packages
self._destination = destination # destination
self._id = id # container id
# this Node class acts as the holder of each element of the linked list
# each node holds a value for the forward and backwards element unless
# it is a head or tail which it is then None.
class _Node :
# sentinel node declarations
def __init__(self, elem, next = None, back = None) :
self._elem = elem
self._next = next
self._back = back
# displays package contents info of container
def containerPackageInfo(self) :
tmpRef = self._header._next
while (tmpRef != self._trailer) :
print("Package ID: " + str(tmpRef._package._id) + ", Owner: " + str(tmpRef._package._name) + ", Weight: " + str(tmpRef._package._weight) + ", Destination: " + str(tmpRef._package._destination))
tmpRef = tmpRef._next
return
# used when saving yard data for the package information
def containerSavePackageInfo(self, saveString) :
saveString = saveString + ("\n")
tmpRef = self._header._next
while (tmpRef != self._trailer) :
saveString = saveString + str(tmpRef._package._id) + ", " + str(tmpRef._package._name) + ", " + str(tmpRef._package._weight) + "\n"
tmpRef = tmpRef._next
return (saveString)
# it traverses the container and searches for matching package id
def traverseGrabPackage(self, findpkgid) :
tmpRef = self._header._next
while (tmpRef != self._trailer):
# if matching, return package information to be used in the menu
if (tmpRef._package._id == findpkgid):
return (tmpRef._package._id, tmpRef._package._name, tmpRef._package._destination, tmpRef._package._weight)
tmpRef = tmpRef._next
# if it gets here, package wasn't found
return ("Package Not Found", "", "", "")
# inserts package pointer into container node
def insertPackage(self, packageholder) :
self._size += 1
insPoint = self._header
while (insPoint._next != self._trailer and str(packageholder._weight) >= str(insPoint._next._elem)) :
insPoint = insPoint._next
postPoint = insPoint._next
# create new node
newNode = self._Node(packageholder._weight, postPoint, insPoint)
newNode._package = packageholder # store pointer to package
insPoint._next = newNode # link next node
postPoint._back = newNode # link prev node
# Delete specific package by its package ID inside a container
def deletePackageByID(self, elem) :
# This performs a sequential search so it can
# be used for any DLList().
# If the value is not there, an error message is presented.
self._size -= 1
delPoint = self._header._next
# when deletion point is found, update the list to exclude the node
while (delPoint != self._trailer and str(elem) != str(delPoint._package._id)) :
delPoint = delPoint._next
if (delPoint == self._trailer) :
print ("Not in this container")
return
# update doubly linked list pointers
pred = delPoint._back
succ = delPoint._next
pred._next = succ
succ._back = pred
return
# This function tells how many packages are in the container
# Did not actually use, used _numpkgs stored as a variable in the container
def checkPackageCount(self):
count=0
tmpRef = self._header._next
while (tmpRef != self._trailer) :
count+=1
tmpRef = tmpRef._next
return (count)
# Package class where each package created is entered into containers where
# they will be shipped out of the shipyard eventually
class Package :
def __init__(self, id, name, weight, destination) :
self._id = id # system generated package id
self._name = name # owner name
self._weight = weight # package weight in lbs.
self._destination = destination # destination
|
|
import logging
import time
import requests
import six.moves.urllib.parse as urlparse
from .. import SSS_VERSION
from .. import SSS_FORMAT
from .. import ACTION_PREFIX
from .. import client
from ..common import constants
from ..common import exceptions
from ..common import serializer
from ..common import utils
from ..i18n import _
from datetime import datetime
_logger = logging.getLogger(__name__)
def exception_handler_v10(status_code, error_content):
"""Exception handler for API v1.0 client.
This routine generates the appropriate SSS exception according to
the contents of the response body.
:param status_code: HTTP error status code
:param error_content: deserialized body of error response
"""
error_dict = None
if isinstance(error_content, dict):
error_dict = error_content.get('SSSError')
# Find real error type
bad_err_error_flag = False
if error_dict:
# If SSS key is found, it will definitely contain
# a 'message' and 'type' keys?
try:
error_type = error_dict['type']
error_message = error_dict['message']
if error_dict['detail']:
error_message += "\n" + error_dict['detail']
except Exception:
bad_err_error_flag = True
if not bad_err_error_flag:
# If corresponding exception is defined, use it.
client_exc = getattr(exceptions, '%sClient' % error_type, None)
# Otherwise look up per status-code client exception
if not client_exc:
client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code)
if client_exc:
raise client_exc(message=error_message,
status_code=status_code)
else:
raise exceptions.SSSClientException(
status_code=status_code, message=error_message)
else:
raise exceptions.SSSClientException(status_code=status_code,
message=error_dict)
else:
message = None
if isinstance(error_content, dict):
message = error_content.get('message')
if message:
raise exceptions.SSSClientException(status_code=status_code,
message=message)
# If we end up here the exception was not a neutron error
msg = "%s-%s" % (status_code, error_content)
raise exceptions.SSSClientException(status_code=status_code,
message=msg)
class APIParamsCall(object):
"""A Decorator to add support for format and tenant overriding and filters.
"""
def __init__(self, function):
self.function = function
def __get__(self, instance, owner):
def with_params(*args, **kwargs):
_format = instance.format
if 'format' in kwargs:
instance.format = kwargs['format']
ret = self.function(instance, *args, **kwargs)
instance.format = _format
return ret
return with_params
class ClientBase(object):
"""Client for the OpenStack SSS v1.0 API.
:param string username: Username for authentication. (optional)
:param string user_id: User ID for authentication. (optional)
:param string password: Password for authentication. (optional)
:param string token: Token for authentication. (optional)
:param string tenant_name: Tenant name. (optional)
:param string tenant_id: Tenant id. (optional)
:param string auth_strategy: 'keystone' by default, 'noauth' for no
authentication against keystone. (optional)
:param string auth_url: Keystone service endpoint for authorization.
:param string service_type: Network service type to pull from the
keystone catalog (e.g. 'network') (optional)
:param string endpoint_type: Network service endpoint type to pull from the
keystone catalog (e.g. 'publicURL',
'internalURL', or 'adminURL') (optional)
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:param string endpoint_url: A user-supplied endpoint URL for the neutron
service. Lazy-authentication is possible for API
service calls if endpoint is set at
instantiation.(optional)
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
:param bool insecure: SSL certificate validation. (optional)
:param bool log_credentials: Allow for logging of passwords or not.
Defaults to False. (optional)
:param string ca_cert: SSL CA bundle file to use. (optional)
:param integer retries: How many times idempotent (GET, PUT, DELETE)
requests to SSS server should be retried if
they fail (default: 0).
:param bool raise_errors: If True then exceptions caused by connection
failure are propagated to the caller.
(default: True)
:param session: Keystone client auth session to use. (optional)
:param auth: Keystone auth plugin to use. (optional)
Example::
from sssclient.v1_0 import client
sss = client.Client(username=USER,
password=PASS,
tenant_name=TENANT_NAME,
auth_url=KEYSTONE_URL)
nets = sss.list_networks()
...
"""
# API has no way to report plurals, so we have to hard code them
# This variable should be overridden by a child class.
EXTED_PLURALS = {}
def __init__(self, **kwargs):
"""Initialize a new client for the SSS v1.0 API."""
super(ClientBase, self).__init__()
self.retries = kwargs.pop('retries', 0)
self.raise_errors = kwargs.pop('raise_errors', True)
self.httpclient = client.construct_http_client(**kwargs)
self.version = SSS_VERSION
self.format = SSS_FORMAT
self.action_prefix = ACTION_PREFIX
self.retry_interval = 1
def _handle_fault_response(self, status_code, response_body):
# Create exception with HTTP status code and message
_logger.debug("Error message: %s", response_body)
# Add deserialized error message to exception arguments
try:
des_error_body = self.deserialize(response_body, status_code)
except Exception:
# If unable to deserialized body it is probably not a
# SSS error
des_error_body = {'message': response_body}
# Raise the appropriate exception
exception_handler_v10(status_code, des_error_body)
def do_request(self, method, action, body=None, headers=None, params=None):
# Add format and tenant_id
# action += ".%s" % self.format
if ACTION_PREFIX:
action = self.action_prefix + action
# action = self.action_prefix + action
if type(params) is dict and params:
params = utils.safe_encode_dict(params)
action += '?' + urlparse.urlencode(params, doseq=1)
if body:
body = self.serialize(body)
resp, replybody = self.httpclient.do_request(
action, method, body=body,
content_type=self.content_type())
status_code = resp.status_code
if status_code in (requests.codes.ok,
requests.codes.created,
requests.codes.accepted,
requests.codes.no_content):
return self.deserialize(replybody, status_code)
else:
if not replybody:
replybody = resp.reason
self._handle_fault_response(status_code, replybody)
def get_auth_info(self):
return self.httpclient.get_auth_info()
def serialize(self, data):
"""Serializes a dictionary into either XML or JSON.
A dictionary with a single key can be passed and it can contain any
structure.
"""
if data is None:
return None
elif type(data) is dict:
return serializer.Serializer(
self.get_attr_metadata()).serialize(data, self.content_type())
else:
raise Exception(_("Unable to serialize object of type = '%s'") %
type(data))
def deserialize(self, data, status_code):
"""Deserializes an XML or JSON string into a dictionary."""
if status_code == 204:
return data
return serializer.Serializer(self.get_attr_metadata()).deserialize(
data, self.content_type())['body']
def get_attr_metadata(self):
if self.format == 'json':
return {}
old_request_format = self.format
self.format = 'json'
exts = self.list_extensions()['extensions']
self.format = old_request_format
ns = dict([(ext['alias'], ext['namespace']) for ext in exts])
self.EXTED_PLURALS.update(constants.PLURALS)
return {'plurals': self.EXTED_PLURALS,
'xmlns': constants.XML_NS_V20,
constants.EXT_NS: ns}
def content_type(self, _format=None):
"""Returns the mime-type for either 'xml' or 'json'.
Defaults to the currently set format.
"""
_format = _format or self.format
return "application/%s" % (_format)
def retry_request(self, method, action, body=None,
headers=None, params=None):
"""Call do_request with the default retry configuration.
Only idempotent requests should retry failed connection attempts.
:raises: ConnectionFailed if the maximum # of retries is exceeded
"""
max_attempts = self.retries + 1
for i in range(max_attempts):
try:
return self.do_request(method, action, body=body,
headers=headers, params=params)
except exceptions.ConnectionFailed:
# Exception has already been logged by do_request()
if i < self.retries:
_logger.debug('Retrying connection to BHEC and Colocation service')
time.sleep(self.retry_interval)
elif self.raise_errors:
raise
if self.retries:
msg = (_("Failed to connect to SSS server after %d attempts")
% max_attempts)
else:
msg = _("Failed to connect SSS server")
raise exceptions.ConnectionFailed(reason=msg)
def delete(self, action, body=None, headers=None, params=None):
return self.retry_request("DELETE", action, body=body,
headers=headers, params=params)
def get(self, action, body=None, headers=None, params=None):
return self.retry_request("GET", action, body=body,
headers=headers, params=params)
def post(self, action, body=None, headers=None, params=None):
# Do not retry POST requests to avoid the orphan objects problem.
return self.do_request("POST", action, body=body,
headers=headers, params=params)
def put(self, action, body=None, headers=None, params=None):
return self.retry_request("PUT", action, body=body,
headers=headers, params=params)
def list(self, collection, path, retrieve_all=True, **params):
if retrieve_all:
res = []
for r in self._pagination(collection, path, **params):
res.extend(r[collection])
return {collection: res}
else:
return self._pagination(collection, path, **params)
def _pagination(self, collection, path, **params):
if params.get('page_reverse', False):
linkrel = 'previous'
else:
linkrel = 'next'
next = True
while next:
res = self.get(path, params=params)
yield res
next = False
try:
for link in res['%s_links' % collection]:
if link['rel'] == linkrel:
query_str = urlparse.urlparse(link['href']).query
params = urlparse.parse_qs(query_str)
next = True
break
except KeyError:
break
class Client(ClientBase):
#
# Users
#
user_singular_path = "/users/%s" # {user_id} for Show, Update and Delete
user_list_path = "/users" # for List
user_create_path = "/users" # for Create
@APIParamsCall
def list_users(self, **_params):
"""Fetches a list of all Users of a certain contract_id in SSS."""
return self.get(self.user_list_path, params=_params)
@APIParamsCall
def show_user(self, user_id, **_params):
"""Fetche information of a certain user_id in SSS."""
return self.get(self.user_singular_path % (user_id), params=_params)
#@APIParamsCall
#def update_user(self, body=None, user_id="", *args, **_params):
# """Update information of a certain user_id in SSS."""
# return self.put(self.user_singular_path % (user_id), body=body)
@APIParamsCall
def delete_user(self, user_id, **_params):
"""Deletes a certain user in SSS."""
return self.delete(self.user_singular_path % (user_id), params=_params)
@APIParamsCall
def create_user(self, body=None, *args, **_params):
"""Creates a certain user in SSS.."""
return self.post(self.user_create_path, body=body)
#
# Tenants
#
tenant_singular_path = "/tenants/%s" # {user_id} for Show, Update and Delete
tenant_list_path = "/tenants" # for List
tenant_create_path = "/tenants" # for Create
@APIParamsCall
def list_tenants(self, **_params):
"""Fetches a list of all Tenants of a certain contract_id in SSS."""
return self.get(self.tenant_list_path, params=_params)
@APIParamsCall
def show_tenant(self, tenant_id, **_params):
"""Fetche information of a certain tenant_id in SSS."""
return self.get(self.tenant_singular_path % (tenant_id), params=_params)
@APIParamsCall
def delete_tenant(self, tenant_id, **_params):
"""Deletes a certain tenant in SSS."""
return self.delete(self.tenant_singular_path % (tenant_id), params=_params)
@APIParamsCall
def create_tenant(self, body=None, *args, **_params):
"""Creates a certain tenant in SSS.."""
return self.post(self.tenant_create_path, body=body)
#
# Roles
#
role_create_path = "/roles" # for Create
role_delete_path = "/roles/tenants/%s/users/%s" # {tenant_id}, {user_id} for Delete
@APIParamsCall
def delete_role(self, tenant_id, user_id, **params):
"""Deletes a certain role in SSS."""
return self.delete(self.role_delete_path % (tenant_id, user_id))
@APIParamsCall
def create_role(self, body=None, *args, **_params):
"""Creates a certain role in SSS.."""
return self.post(self.role_create_path, body=body)
#
# API Keypair
#
api_keypair_path = "/keys/%s" # {user_id} for Update
@APIParamsCall
def set_api_keypair(self, user_id, *args, **_params):
"""Sets a certain API keypair in SSS."""
return self.put(self.api_keypair_path % (user_id))
#
# Channel
#
channel_path = "/channels?get_contracts=%s" # {user_id} for Update
@APIParamsCall
def list_channels(self, get_contracts, *args, **_params):
"""List channels in SSS."""
return self.get(self.channel_path % (get_contracts))
#
# Contract
#
contract_show_path = "/contracts/%s" # {contract_id} for Show, Delete
contract_list_path = "/contracts?channel_id=%s" # {channel_id} for List
contract_create_path = "/contracts" # for Create
billing_show_path = "/contracts/%s/billing/%s" # for Show
with_target_contract = "%s/target_contract/%s" # for Show billing of each contract
@APIParamsCall
def list_contracts(self, channel_id, **_params):
"""Fetches a list of all contracts of a certain channel_id in SSS."""
return self.get(self.contract_list_path %(channel_id), params=_params)
@APIParamsCall
def show_contract(self, contract_id, **_params):
"""Fetches information of a certain contract_id in SSS."""
return self.get(self.contract_show_path % (contract_id), params=_params)
@APIParamsCall
def delete_contract(self, contract_id, **params):
"""Deletes a certain contract in SSS."""
return self.delete(self.contract_show_path % (contract_id))
@APIParamsCall
def create_contract(self, body=None, *args, **_params):
"""Creates a certain contract in SSS.."""
return self.post(self.contract_create_path, body=body)
@APIParamsCall
def show_billing(self, contract_id, target_month, **_params):
"""Fetches information of a certain contract_id in SSS."""
billing_action = self.billing_show_path % (contract_id, target_month)
return self.get(billing_action, params=_params)
#
# IAM Endpoints
#
iam_group_list_path = "/iam/groups"
iam_group_create_path = "/iam/groups"
iam_group_delete_path = "/iam/groups/%s"
iam_group_attach_role_path = "/iam/groups/%s/roles/%s"
iam_group_attach_user_path = "/iam/groups/%s/users/%s"
iam_group_detach_role_path = "/iam/groups/%s/roles/%s"
iam_group_detach_user_path = "/iam/groups/%s/users/%s"
iam_role_list_path = "/iam/roles"
iam_role_create_path = "/iam/roles"
iam_role_show_path = "/iam/roles/%s"
iam_role_delete_path = "/iam/roles/%s"
iam_user_list_path = "/iam/groups/%s/users"
@APIParamsCall
def iam_group_list(self, contract_id=None):
url = self.iam_group_list_path
if contract_id:
url += "?contract_id=" + contract_id
return self.get(url)
@APIParamsCall
def iam_group_create(self, iam_group_name=None, contract_id=None, description=None):
body = {"iam_group_name":iam_group_name,
"contract_id":contract_id,
"description":description}
return self.post(self.iam_group_create_path, body=body)
@APIParamsCall
def iam_group_delete(self, iam_group_id=None):
return self.delete(self.iam_group_delete_path % (iam_group_id))
@APIParamsCall
def iam_group_attach_user(self, iam_group_id=None, iam_user_id=None):
return self.put(self.iam_group_attach_user_path % (iam_group_id,iam_user_id))
@APIParamsCall
def iam_group_detach_user(self, iam_group_id=None, iam_user_id=None):
return self.delete(self.iam_group_detach_user_path % (iam_group_id,iam_user_id))
@APIParamsCall
def iam_group_attach_role(self, iam_group_id=None, iam_role_id=None):
return self.put(self.iam_group_attach_role_path % (iam_group_id,iam_role_id))
@APIParamsCall
def iam_group_detach_role(self, iam_group_id=None, iam_role_id=None):
return self.delete(self.iam_group_attach_role_path % (iam_group_id,iam_role_id))
@APIParamsCall
def iam_role_list(self, contract_id=None):
url = self.iam_role_list_path
if contract_id:
url += "?contract_id=" + contract_id
return self.get(url)
def iam_role_show(self, iam_role_id=None):
return self.get(self.iam_role_show_path % (iam_role_id))
@APIParamsCall
def iam_role_create(self, iam_role_name=None, contract_id=None,
description=None, resources=None):
body = {"iam_role_name":iam_role_name,
"contract_id":contract_id,
"description":description,
"resources":resources}
return self.post(self.iam_role_create_path, body=body)
@APIParamsCall
def iam_role_delete(self, iam_role_id=None):
return self.delete(self.iam_role_delete_path % (iam_role_id))
@APIParamsCall
def iam_user_list(self, iam_group_id=None):
return self.get(self.iam_user_list_path % (iam_group_id))
|
|
# CSH Touchscreen Software
# Author: Angelo DiNardi (adinardi@csh.rit.edu)
# Beginnings: Nov 21, 2008
import pygame
from pygame.locals import *
import drinkAPI
import math
import time
import sys
from threading import Thread
import signal
pygame.init()
pygame.mouse.set_visible(False)
font = pygame.font.Font(None, 36)
BUTTONS = {}
MACHINES = {}
IMG_SURFACES = {}
IBUTTONREADER = None
INFOSYS = None
if (sys.argv[1] == 'ld'):
MACHINES = {'Little Drink': 'ld'}
else:
MACHINES = {'Big Drink': 'd', 'Snack': 's'}
def endapp(s1, s2):
global IBUTTONREADER, INFOSYS
try:
IBUTTONREADER.end = True
except:
c = 1
try:
INFOSYS.end = True
except:
c = 1
signal.signal(signal.SIGINT, endapp)
def main():
global BUTTONS, IBUTTONREADER, INFOSYS
clock = pygame.time.Clock()
ibuttonreader = checkibutton()
IBUTTONREADER = ibuttonreader
ibuttonreader.start()
infosys = readinfosys()
INFOSYS = infosys
infosys.start()
# State
# 1 = Unauthenicated
# 2 = Select Machine
# 3 = Select Drink
# 4 = Dropping
drink_state = 1
# Machine we're using
current_machine = None
last_ibutton = ''
current_ibutton = ''
# Initialise screen
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Basic Pygame program')
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((250, 250, 250))
# Display some text
#text = font.render("Hello There", 1, (10, 10, 10))
#textpos = text.get_rect()
#textpos.centerx = background.get_rect().centerx
#background.blit(text, textpos)
# Blit everything to the screen
screen.blit(background, (0, 0))
pygame.display.flip()
CHANGE = 1
PROCESSING = 0
csh_net = drinkAPI.Network('csh.rit.edu')
user = None
auth_ibutton = None
ready_to_auth = 0
infosys_message = ''
# Event loop
while 1:
if ibuttonreader.ibutton != last_ibutton and ibuttonreader.ibutton != '':
if len(MACHINES) == 1:
drink_state = 2.5
for i in MACHINES:
current_machine = MACHINES[i]
else:
drink_state = 2
CHANGE = 1
PROCESSING = 1
auth_ibutton = ibuttonreader.ibutton
ibuttonreader.ibutton = ''
last_ibutton = auth_ibutton
current_ibutton = last_ibutton
if infosys.message != '':
infosys_message = infosys.message
CHANGE = 1
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == MOUSEBUTTONDOWN:
#print 'buttons: ' + str(BUTTONS)
testrect = pygame.Rect(event.pos, (0, 0))
#print 'test rect: ' + str(testrect)
clicked_button = testrect.collidedict(BUTTONS)
#print 'clicked button: ' + str(clicked_button)
if clicked_button is None:
continue
elif clicked_button[1]['type'] == 'logout':
# Reset everything!
drink_state = 1
CHANGE = 1
PROCESSING = 0
auth_ibutton = ''
current_ibutton = ''
last_ibutton = ''
elif clicked_button[1]['type'] == 'switch':
drink_state = 2
CHANGE = 1
PROCESSING = 1
# Set us up to get authenticated again
auth_ibutton = current_ibutton
elif drink_state == 1:
if clicked_button[1]['type'] == 'login':
drink_state = 2
CHANGE = 1
PROCESSING = 1
elif drink_state == 2:
if clicked_button[1]['type'] == 'machine':
current_machine = clicked_button[1]['item']
CHANGE = 1
drink_state = 2.5
PROCESSING = 1
elif drink_state == 3:
clicked_key = clicked_button[1]
print "checking clicked key: " + str(clicked_key)
if clicked_key['type'] == 'item':
print "got item click: " + str(clicked_key['item'])
drink_state = 4
CHANGE = 1
PROCESSING = 1
item_to_drop = clicked_key['item']
if CHANGE == 1:
CHANGE = 0
background.fill((255, 255, 255))
# Redrawing the interface, so kill the stashed buttons
BUTTONS = {}
if drink_state == 1:
#login_btn = pygame.Surface((200, 50))
#login_btn_rect = login_btn.get_rect()
#login_btn_rect.left = 20
#login_btn_rect.top = 550
#text = font.render("Login", 1, (255, 255, 255))
#login_btn.blit(text, (10, 10))
# background.blit(login_btn, login_btn_rect)
#BUTTONS[tuple(login_btn_rect)] = {'type': 'login'}
render_message(background, "Touch iButton to continue...\n\n" + infosys_message)
elif drink_state == 2:
c = 1
render_machine_choices(background)
render_logout_btn(background)
elif drink_state == 2.5:
c = 1
render_message(background, "Authenticating...")
elif drink_state == 3:
render_switch_btn(background)
render_user_info(background, user)
render_logout_btn(background)
inv = system.inventory
render_drink_choices(background, inv, user)
elif drink_state == 4:
c = 1
render_message(background, "Dropping...")
screen.blit(background, (0, 0))
pygame.display.flip()
if PROCESSING == 1:
PROCESSING = 0
if drink_state == 2.5 and auth_ibutton is not None:
# user = drinkAPI.User('', '3b00000e4bbC9301')
user = drinkAPI.User('', auth_ibutton)
auth_ibutton = None
last_ibutton = ''
system = drinkAPI.System('drink', current_machine)
authed = user.connect_to_system(system, csh_net)
if authed:
drink_state = 3
CHANGE = 1
PROCESSING = 0
elif drink_state == 4:
print 'dropping ' + str(item_to_drop)
user.purchase(item_to_drop, 0)
time.sleep(5)
PROCESSING = 0
CHANGE = 1
drink_state = 1
clock.tick(20)
def render_message(bg, message):
fonting = pygame.font.Font(None, 30)
msg = pygame.Surface((800, 600))
msg.fill((255, 255, 255))
rect = msg.get_rect()
rect.left = 20
rect.top = 50
lines = message.splitlines()
c = 0
for line in lines:
text = fonting.render(line, 1, (0, 0, 0))
msg.blit(text, (0, c * 20))
c = c + 1
bg.blit(msg, rect)
return rect
def render_switch_btn(bg):
switch_btn = pygame.Surface((200, 50))
rect = switch_btn.get_rect()
rect.left = 20
rect.top = 550
BUTTONS[tuple(rect)] = {'type': 'switch'}
text = font.render("Switch", 1, (255, 255, 255))
tr = text.get_rect()
tr.centerx = switch_btn.get_rect().centerx
tr.centery = switch_btn.get_rect().centery
switch_btn.blit(text, tr)
bg.blit(switch_btn, rect)
return rect
def render_logout_btn(bg):
logout = pygame.Surface((200, 50))
rect = logout.get_rect()
rect.left = 580
rect.top = 550
BUTTONS[tuple(rect)] = {'type': 'logout'}
text = font.render("Logout", 1, (255, 255, 255))
tr = text.get_rect()
tr.centerx = logout.get_rect().centerx
tr.centery = logout.get_rect().centery
logout.blit(text, tr)
bg.blit(logout, rect)
return rect
def render_user_info(bg, user):
user_info = pygame.Surface((200, 50))
user_info.fill((255, 255, 255))
rect = user_info.get_rect()
#rect.left = 220
rect.centerx = bg.get_rect().centerx
rect.top = 550
font = pygame.font.Font(None, 25)
text = font.render("User: " + user.name, 1, (0, 0, 0))
user_info.blit(text, (10, 0))
text2 = font.render("Credits: " + str(user.get_credits_balance()), 1, (0, 0, 0))
user_info.blit(text2, (10, 20))
bg.blit(user_info, rect)
return rect
def render_drink_choices(bg, choices, user):
global IMG_SURFACES
#inv = pygame.Surface((800, 500))
#inv.fill((255, 255, 255))
#rect = inv.get_rect()
#rect.left = 0
#rect.top = 50
font = pygame.font.Font(None, 20)
item_count = len(choices)
width = (770 / math.ceil(item_count / 4.0))
cur_item_on_row = 0
items_per_row = math.ceil(770 / width) - 1
cur_row = 0
user_credits = user.get_credits_balance()
for item in choices:
enabled = 1
text_color = (255, 255, 255)
bg_color = (102, 0, 102)
if (item.quantity < 1 or item.price > user_credits):
enabled = 0
text_color = (187, 187, 187)
bg_color = (51, 51, 51)
s = pygame.Surface((width - 2, 98))
s.fill(bg_color)
NO_IMG = False
img_name = item.name.lower().replace(' ', '').replace('\'', '')
if (img_name in IMG_SURFACES):
img = IMG_SURFACES[img_name]
else:
try:
img = pygame.image.load("images/" + img_name + ".png")
img = pygame.transform.smoothscale(img, (80, 80))
except:
try:
img = pygame.image.load("images/unknown.png")
img = pygame.transform.smoothscale(img, (80, 80))
except:
NO_IMG = True
IMG_SURFACES[img_name] = img
if (NO_IMG == False):
s.blit(img, (10, 10))
text = font.render(item.name, 1, text_color)
s.blit(text, (100, 10))
text = font.render(str(item.price), 1, text_color)
s.blit(text, (100, 25))
if (cur_item_on_row > items_per_row):
# reset us to the next row, pos 1
cur_item_on_row = 0
cur_row = cur_row + 1
sr = s.get_rect()
sr.left = (cur_item_on_row * width) + 10
sr.top = (cur_row * 100) + 50
cur_item_on_row = cur_item_on_row + 1
#inv.blit(s, sr)
if enabled:
BUTTONS[tuple(sr)] = {'type': 'item', 'item': item}
bg.blit(s, sr)
#bg.blit(inv, rect)
def render_machine_choices(bg):
cur_machine = 0
for m in MACHINES:
ms = pygame.Surface((200, 100))
ms.fill((102, 0, 102))
text = font.render(m, 1, (255, 255, 255))
tr = text.get_rect()
tr.centerx = ms.get_rect().centerx
tr.centery = ms.get_rect().centery
ms.blit(text, tr)
rect = ms.get_rect()
rect.centerx = bg.get_rect().centerx
rect.top = 100 + (200*cur_machine)
bg.blit(ms, rect)
cur_machine = cur_machine + 1
BUTTONS[tuple(rect)] = {'type': 'machine', 'item': MACHINES[m]}
class readinfosys(Thread):
def __init__ (self):
Thread.__init__(self)
self.message = ''
self.end = False
def run(self):
while(self.end == False):
time.sleep(60)
try:
iffile = open('/tmp/infosysdata', 'r')
print 'checking infosys file'
if iffile is not None:
data = ''
line = None
line = iffile.readline()
while(line):
data = data + line
line = iffile.readline()
iffile.close()
self.message = data
except:
c = 1
class checkibutton(Thread):
def __init__ (self):
Thread.__init__(self)
self.ibutton = ''
self.end = False
def run(self):
while(self.end == False):
time.sleep(1)
try:
ibfile = open('/tmp/ibutton', "r")
print 'checking file'
if ibfile is not None:
line = ibfile.readline()
print 'got: "' + line + '"'
nibutton = line
if nibutton != '':
self.ibutton = nibutton
ibfile.close()
if (self.ibutton != ''):
ibfile = open('/tmp/ibutton', "w")
ibfile.write("")
ibfile.close()
finally:
c = 1
if __name__ == '__main__': main()
|
|
'''
'
' Spotpost serverside. Provides all functionality for the website and application.
' Allows for location based notes to be viewed and made by users.
'
' @author Jakub Wlodarczyk
' @author Brian Truong
' @author Nate Norgaard
' @author Adam Davis
'
'''
from flask import Flask, session, request, abort, render_template, redirect, url_for, escape
from passlib.hash import sha256_crypt
from resource.dbmanager import DBManager
#from comments import add_comment
import os
import sqlite3
import math
# For decoding JSON request data. Strings come in unicode format. Possibly useless :(.
from unidecode import unidecode
# If Python2.6, simplejson is better than json, but in Python2.7, simplejson became json
try: import simplejson as json
except ImportError: import json
manager = DBManager()
app = Flask(__name__)
# For logging to a file called "spotpost_log"
import logging
from logging import FileHandler
file_handler = FileHandler("spotpost_log")
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
# how to log manually:
# app.logger.warning("%s", "hello!")
###
# Equation for scoring posts.
#
# delta_lon
###
###
#
# Calculates bounding latitude and longitude.
# Bounds by approximation using a square.
#
# @param lon = longitude of center point.
# @param lat = latitude of center point.
# @param radius = radius in meters of circle contained within square.
#
# @return maximum longitude in bounding square.
# @return maximum latitude in bounding square.
# @return minimum longitude in bounding square.
# @return minimum latitude in bounding square.
#
###
def calc_bounding_coords(lon, lat, radius):
km_radius = radius / 1000
km_per_long_deg = 111.320 * math.cos(lat / 180.0 * math.pi)
deltaLat = km_radius / 111.1
deltaLong = km_radius / km_per_long_deg
min_lat = lat - deltaLat
max_lat = lat + deltaLat
min_long = lon - deltaLong
max_long = lon + deltaLong
return max_long, max_lat, min_long, min_lat
###
#
# Returns a JSON containing an array of users who are following
# the user with username.
#
# @TODO CURRENTLY ONLY RETURNS LIST OF USERNAMES MODIFY TO RETURN LIST OF USER ARRAYS.
#
# @param username = username of followee
#
###
@app.route('/followerlist/<username>')
def get_follower_list(username):
data = manager.get_list_of_followers(username)
return json.dumps(data)
###
#
# Allows a user to follow another user.
# @TODO ERROR CHECKING FOR THIS.
#
###
@app.route('/_follow/<username>')
def follow_user(username):
curr_user = session['username']
manager.insert_follow_relation(curr_user, username)
#TEMP MUST REPLACE WITH REAL REDIRECT
return redirect(url_for('index'))
###
#
# Allows a user to unfollow another user.
# @TODO ERROR CHECKING FOR THIS.
#
###
@app.route('/_unfollow/<username>')
def unfollow_user(username):
curr_user = session['username']
manager.delete_follow_relation(curr_user, username)
return redirect(url_for('index'))
###
#
# Allows clientside to make a POST request to add data to the server database.
# SpotPosts(id, content, title, reputation, longitude, latitude, username, time)
#
# JSON must be constructed following convention below (REQUIRED DATA IS DENOTED WITH A *):
# * "content" : "text of spotpost"
# * "title" : "title of spotpost"
# "username" : "username of person making spotpost" NOTE: MAY BE DEPRECEATED IN FUTURE VERSIONING
# * "latitude" : "latitude of spotpost"
# * "longitude" : "longitude of spotpost"
# "reputation" : "custom starting reputation" NOTE: WILL BE DEPRECEATED IN FUTURE VERSIONING.
#
###
@app.route('/spotpost/_post', methods = ['POST'])
def post_spotpost():
data = request.data
decoded_data = json.loads(data)
if 'username' in session.keys():
username = session['username']
error_dict = manager.insert_spotpost(decoded_data, username)
else:
error_dict = manager.insert_spotpost(decoded_data, None)
return json.dumps(error_dict)
###
#
# Adds a comment to the database.
#
# Provided JSON must follow this format (REQUIRED DATA IS DENOTED WITH A *)
# * "message_id" : "id of spotpost this comment is from"
# * "content" : "content of comment"
# "username" : "username of user who posted comment, Optional will normally be current user." NOTE: WILL BE DEPREACTED ONCE EVERYTHING IS CONFIRMED FUNCTIONING.
# "reputation" : "custom starting reputation" NOTE: WILL BE DEPRECEATED IN FUTURE VERSIONING.
#
###
@app.route('/comment/_post', methods = ['POST'])
def post_comment():
data = request.data
decoded_data = json.loads(data)
error_dict = manager.insert_comment(decoded_data, session['username'])
return json.dumps(error_dict)
###
#
# Allows clientside to make a GET request to get spotposts around a center latitude longitude point.
# Gets best top_count amount of spotposts. 10 default.
# Uses a radius provided, 100 meters default.
#
# URL must be constructed following convention below. Latitude and longitude are required:
# URL?/&latitude = latitude of center point of bounding square.
# URL&longitude = longitude of center point of bounding square.
# URL?/&radius = "radius" of bounding square.
#
###
@app.route('/spotpost/_getlocation')
def get_spotpost_by_location():
error_dict = {}
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
radius = request.args.get('radius')
top_count = request.args.get('top_count')
unlock_posts = request.args.get('unlock_posts')
if not radius:
radius = 100
if not top_count:
top_count = 10
if not latitude or not longitude:
error_dict['error'] = {"code" : "1092", "message" : "Location not provided."}
return json.dumps(error_dict)
max_long, max_lat, min_long, min_lat = calc_bounding_coords(float(longitude), float(latitude), float(radius))
data = manager.location_search_spotpost(min_lat, max_lat, min_long, max_long, top_count)
username = None
if 'username' in session:
username = session['username']
if unlock_posts and username:
unlock_posts = int(unlock_posts)
if unlock_posts:
#data is an array of dictionaries.
for row in data:
unlock_id = row['id']
manager.insert_unlock_relation(username, unlock_id)
return json.dumps(data)
###
#
# Allows clientside to make a GET request to get spotposts from the server database.
#
# NOTE: TO ADD MORE ARGUMENTS THE CONVENTION ?min_reputation=10&max_reputation=100 MUST BE FOLLOWED.
# NOTE: TO FURTHER THE POINT ABOVE I HAVE WRITTEN ?/& TO SHOW THAT ITS ONE OR THE OTHER DEPENDING ON PREVIOUS DATA.
#
# URL must be constructed following convention below (NOT ALL DATA IS REQUIRED):
# URL?min_reputation = minimum reputation to search for.
# URL?/&max_reputation = maximum reputation to search for.
# URL?/&id = desired spotpost ID.
# URL?/&latitude = latitude of center point of bounding square. NOTE: ALL 3 VARIABLES MUST BE PROVIDED TO USE BOUNDING SQUARE. OTHERWISE SEARCH IGNORES IT.
# URL&longitude = longitude of center point of bounding square.
# URL?/&radius = "radius" of bounding square.
# URL?/&lock_value = Lock status of spotposts. (0 = All posts locked or unlocked, 1 = All locked posts, 2 = All unlocked posts).
# URL?/&unlock_posts = Unlock all returned posts for the user. 0 or nothing = do not unlock posts, everything else = unlock posts.
#
# SEPERATE OUT FUNCTIONALITY.
###
@app.route('/spotpost/_get')
def get_spotpost():
min_reputation = request.args.get('min_reputation')
max_reputation = request.args.get('max_reputation')
username = request.args.get('username')
post_id = request.args.get('id')
lock_value = request.args.get('lock_value')
unlock_posts = request.args.get('unlock_posts')
data = manager.select_spotpost(min_reputation, max_reputation, username, post_id, lock_value)
if not username and 'username' in session:
username = session['username']
if unlock_posts and username:
unlock_posts = int(unlock_posts)
if unlock_posts:
#data is an array of dictionaries.
for row in data:
unlock_id = row['id']
manager.insert_unlock_relation(username, unlock_id)
return json.dumps(data)
###
#
# Upvotes a given comment.
#
# @param id = id of comment.
#
###
@app.route('/comment/_upvote/<id>')
def upvote_comment(id):
if 'username' in session:
return manager.rate_comment(1, id, session['username'])
else:
return "MUST BE LOGGED IN TO UPVOTE."
###
#
# Downvotes a given comment.
#
# @param id = id of comment.
#
###
@app.route('/comment/_downvote/<id>')
def downvote_comment(id):
if 'username' in session:
return manager.rate_comment(-1, id, session['username'])
else:
return "MUST BE LOGGED IN TO DOWNVOTE."
###
#
# Upvotes a given spotpost.
#
# @param id = id of SpotPost.
#
###
@app.route('/spotpost/_upvote/<id>')
def upvote_spotpost(id):
return manager.rate_post(1, id, session['username'])
###
#
# Downvotes a given spotpost.
#
# @param id = id of SpotPost.
#
###
@app.route('/spotpost/_downvote/<id>')
def downvote_spotpost(id):
return manager.rate_post(-1, id, session['username'])
###
#
# Deletes a given spotpost. Must be logged in as Admin
#
# @param id = id of SpotPost.
#
###
@app.route('/spotpost/_delete/<id>')
def delete_spotpost(id):
error_dict = {}
if 'privilege' in session:
manager.delete_post(id)
error_dict['error'] = {"code" : "1000", "message" : "Success."}
return json.dumps(error_dict);
else:
error_dict['error'] = {"code" : "1032", "message" : "Admin privileges required."}
return json.dumps(error_dict);
###
#
# Updates a given spotpost with new values. Must be logged in as Admin.
#
# JSON must be constructed following convention below (REQUIRES AT LEAST ONE TO BE ENTERED):
# id field is REQUIRED.
#
# "id" : "id of spotpost"
# "content" : "text of spotpost"
# "username" : "username of person making spotpost" NOTE: MAY BE DEPRECEATED IN FUTURE VERSIONING
# "latitude" : "latitude of spotpost"
# "longitude" : "longitude of spotpost"
# "reputation" : "custom starting reputation" NOTE: WILL BE DEPRECEATED IN FUTURE VERSIONING.
#
###
@app.route('/spotpost/_update', methods = ['POST'])
def update_spotpost():
error_dict = {}
data = request.data
decoded_data = json.loads(data)
if 'privilege' in session.keys() and 'id' in decoded_data.keys():
manager.update_post(decoded_data)
error_dict['error'] = {"code" : "1000", "message" : "Success."}
return json.dumps(error_dict);
else:
error_dict['error'] = {"code" : "1032", "message" : "Admin privileges required."}
return json.dumps(error_dict);
###
#
# Logs the user in if the user exists and the password is correct.
#
###
@app.route('/login', methods =['GET', 'POST'])
def login():
if request.method == 'POST':
data = request.data
decoded_data = json.loads(data)
username = decoded_data['username']
password = decoded_data['password']
valid_login = manager.validate_user(username, password)
if valid_login:
session['username'] = username
session['privilege'] = manager.get_privilege(username)
error_dict = {"code" : "1000", "message" : "Success."}
return json.dumps(error_dict)
else:
error_dict = {"code" : "1050", "message" : "Invalid login information."}
return json.dumps(error_dict)
return render_template('login.html')
###
#
# Registers the user into the Database.
#
###
@app.route('/_register', methods =['POST'])
def register():
data = request.data
decoded_data = json.loads(data)
username = decoded_data['username']
password = decoded_data['password']
retval = manager.insert_user(username, password)
#Log user in.
session['privilege'] = manager.get_privilege(username)
session['username'] = username
return json.dumps(retval)
###
#
# Promotes a user to an admin.
# @param username = user to be promoted.
#
###
@app.route('/promote/<username>')
def promote_user(username):
error_dict = {}
if 'username' in session:
curr_user = session['username']
else:
error_dict['error'] = {"code" : "1087", "message" : "Not logged in."}
if curr_user:
privilege = manager.get_privilege(curr_user)
if privilege:
manager.update_privilege(username, 1)
else:
error_dict['error'] = {"code" : "1032", "message" : "Admin privileges required."}
return json.dumps(error_dict)
###
#
# Quick debugging fix to check if there is a user or not.
#
###
@app.route('/_userstatus')
def is_logged_in():
data = {}
if 'username' in session:
data = manager.get_user(session['username'])
data['error'] = {"code" : "1000", "message" : "Success."}
else:
data['error'] = {"code" : "1087", "message" : "Not logged in."}
return json.dumps(data)
###
#
# Logs the user out.
#
###
@app.route('/_logout')
def logout():
session.pop('username', None)
session.pop('privilege', None)
return redirect(url_for('index'))
###
#
# Shows homepage, simply serves as a way to get to other pages.
#
###
@app.route('/')
def index():
#if 'username' in session:
# return 'Logged in as %s' % escape(session['username'])
#return 'You are not logged in'
if 'username' in session:
return render_template('index.html')
else:
return redirect(url_for('login'))
if __name__ == '__main__':
# Runs on port 5000 by default
# url: "localhost:5000"
# Secret Key for sessions.
# @TODO Change to random key.
app.secret_key = 'Bv`L>?h^`qeQr6f7c$DK.E-gvMXZR+'
app.run(host="0.0.0.0")
manager.close_connection()
|
|
from __future__ import unicode_literals
import itertools
import os
import random
import unicodedata
from collections import defaultdict
from django.conf import settings
from django.core.files import File
from django.template.defaultfilters import slugify
from django.utils.six import moves
from faker import Factory
from faker.providers import BaseProvider
from payments import PaymentStatus
from prices import Price
from ...discount.models import Sale, Voucher
from ...order import OrderStatus
from ...order.models import DeliveryGroup, Order, OrderedItem, Payment
from ...product.models import (AttributeChoiceValue, Category, Product,
ProductAttribute, ProductClass, ProductImage,
ProductVariant, Stock, StockLocation)
from ...shipping.models import ANY_COUNTRY, ShippingMethod
from ...userprofile.models import Address, User
from ...userprofile.utils import store_user_address
fake = Factory.create()
STOCK_LOCATION = 'default'
DEFAULT_CATEGORY = 'Default'
DELIVERY_REGIONS = [ANY_COUNTRY, 'TT', 'GY', 'SUR', 'BAR']
# DEFAULT_SCHEMA = {
# 'T-Shirt': {
# 'category': 'Apparel',
# 'product_attributes': {
# 'Color': ['Blue', 'White'],
# 'Collar': ['Round', 'V-Neck', 'Polo'],
# 'Brand': ['Saleor']
# },
# 'variant_attributes': {
# 'Size': ['XS', 'S', 'M', 'L', 'XL', 'XXL']
# },
# 'images_dir': 't-shirts/',
# 'is_shipping_required': True
# },
# 'Mugs': {
# 'category': 'Accessories',
# 'product_attributes': {
# 'Brand': ['Saleor']
# },
# 'variant_attributes': {},
# 'images_dir': 'mugs/',
# 'is_shipping_required': True
# },
# 'Coffee': {
# 'category': 'Groceries',
# 'product_attributes': {
# 'Coffee Genre': ['Arabica', 'Robusta'],
# 'Brand': ['Saleor']
# },
# 'variant_attributes': {
# 'Box Size': ['100g', '250g', '500g', '1kg']
# },
# 'different_variant_prices': True,
# 'images_dir': 'coffee/',
# 'is_shipping_required': True
# },
# 'Candy': {
# 'category': 'Groceries',
# 'product_attributes': {
# 'Flavor': ['Sour', 'Sweet'],
# 'Brand': ['Saleor']
# },
# 'variant_attributes': {
# 'Candy Box Size': ['100g', '250g', '500g']
# },
# 'images_dir': 'candy/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# },
# 'E-books': {
# 'category': 'Books',
# 'product_attributes': {
# 'Author': ['John Doe', 'Milionare Pirate'],
# 'Publisher': ['Mirumee Press', 'Saleor Publishing'],
# 'Language': ['English', 'Pirate']
# },
# 'variant_attributes': {},
# 'images_dir': 'books/',
# 'is_shipping_required': False
# },
# 'Books': {
# 'category': 'Books',
# 'product_attributes': {
# 'Author': ['John Doe', 'Milionare Pirate'],
# 'Publisher': ['Mirumee Press', 'Saleor Publishing'],
# 'Language': ['English', 'Pirate']
# },
# 'variant_attributes': {
# 'Cover': ['Soft', 'Hard']
# },
# 'images_dir': 'books/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# }
# }
DEFAULT_SCHEMA = {
# Love and Romance
# 'Roses': {
# 'category': 'Love & Romance',
# 'product_attributes': {
# 'Color': ['Red', 'Yellow', 'White', 'Pink']
# },
# 'variant_attributes': {
# 'Vase': ['Y', 'N'],
# 'Amount':['1', '5', '10']
# },
# 'images_dir': 'lr_roses/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# },
# 'Lilies': {
# 'category': 'Love & Romance',
# 'product_attributes': {
# 'Color': ['Red', 'Yellow', 'White']
# },
# 'variant_attributes': {
# 'Vase': ['Y', 'N'],
# 'Amount':['1', '5', '10']
# },
# 'images_dir': 'lr_lilies/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# },
# 'Carnations': {
# 'category': 'Love & Romance',
# 'product_attributes': {
# 'Color': ['Red', 'Yellow', 'White']
# },
# 'variant_attributes': {
# 'Vase': ['Y', 'N'],
# 'Amount':['1', '5', '10']
# },
# 'images_dir': 'lr_carnations/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# },
# Sympathy
'Roses': {
'category': 'Sympathy',
'product_attributes': {
'Color': ['Red', 'Yellow', 'White', 'Pink', 'Purple']
},
'variant_attributes': {
'Vase': ['Y', 'N'],
'Amount':['1', '5', '10']
},
'images_dir': 'sym_roses/',
'different_variant_prices': True,
'is_shipping_required': True
},
'Lilies': {
'category': 'Sympathy',
'product_attributes': {
'Color': ['Red', 'Yellow', 'White']
},
'variant_attributes': {
'Vase': ['Y', 'N'],
'Amount':['1', '5', '10']
},
'images_dir': 'sym_lilies/',
'different_variant_prices': True,
'is_shipping_required': True
} #,
# # Anniversary
# 'Roses': {
# 'category': 'Anniversary',
# 'product_attributes': {
# 'Color': ['Red', 'Yellow', 'White', 'Pink']
# },
# 'variant_attributes': {
# 'Vase': ['Y', 'N'],
# 'Amount':['1', '5', '10']
# },
# 'images_dir': 'ann_roses/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# },
# 'Lilies': {
# 'category': 'Anniversary',
# 'product_attributes': {
# 'Color': ['Red', 'Yellow', 'White']
# },
# 'variant_attributes': {
# 'Vase': ['Y', 'N'],
# 'Amount':['1', '5', '10']
# },
# 'images_dir': 'ann_lilies/',
# 'different_variant_prices': True,
# 'is_shipping_required': True
# }
}
def create_attributes_and_values(attribute_data):
attributes = []
for attribute_name, attribute_values in attribute_data.items():
attribute = create_attribute(
slug=slugify(attribute_name), name=attribute_name)
for value in attribute_values:
create_attribute_value(attribute, name=value)
attributes.append(attribute)
return attributes
def create_product_class_with_attributes(name, schema):
product_attributes_schema = schema.get('product_attributes', {})
variant_attributes_schema = schema.get('variant_attributes', {})
is_shipping_required = schema.get('is_shipping_required', True)
product_class = get_or_create_product_class(
name=name, is_shipping_required=is_shipping_required)
product_attributes = create_attributes_and_values(
product_attributes_schema)
variant_attributes = create_attributes_and_values(
variant_attributes_schema)
product_class.product_attributes.add(*product_attributes)
product_class.variant_attributes.add(*variant_attributes)
return product_class
def create_product_classes_by_schema(root_schema):
results = []
for product_class_name, schema in root_schema.items():
product_class = create_product_class_with_attributes(
product_class_name, schema)
results.append((product_class, schema))
return results
def set_product_attributes(product, product_class):
attr_dict = {}
for product_attribute in product_class.product_attributes.all():
value = random.choice(product_attribute.values.all())
attr_dict[str(product_attribute.pk)] = str(value.pk)
product.attributes = attr_dict
product.save(update_fields=['attributes'])
def set_variant_attributes(variant, product_class):
attr_dict = {}
existing_variants = variant.product.variants.values_list('attributes',
flat=True)
existing_variant_attributes = defaultdict(list)
for variant_attrs in existing_variants:
for attr_id, value_id in variant_attrs.items():
existing_variant_attributes[attr_id].append(value_id)
for product_attribute in product_class.variant_attributes.all():
available_values = product_attribute.values.exclude(
pk__in=[int(pk) for pk
in existing_variant_attributes[str(product_attribute.pk)]])
if not available_values:
return
value = random.choice(available_values)
attr_dict[str(product_attribute.pk)] = str(value.pk)
variant.attributes = attr_dict
variant.save(update_fields=['attributes'])
def get_variant_combinations(product):
# Returns all possible variant combinations
# For example: product class has two variant attributes: Size, Color
# Size has available values: [S, M], Color has values [Red, Green]
# All combinations will be generated (S, Red), (S, Green), (M, Red),
# (M, Green)
# Output is list of dicts, where key is product attribute id and value is
# attribute value id. Casted to string.
variant_attr_map = {attr: attr.values.all()
for attr
in product.product_class.variant_attributes.all()}
all_combinations = itertools.product(*variant_attr_map.values())
return [{str(attr_value.attribute.pk): str(attr_value.pk)
for attr_value in combination}
for combination in all_combinations]
def get_price_override(schema, combinations_num, current_price):
prices = []
if schema.get('different_variant_prices'):
prices = sorted(
[current_price + fake.price() for _ in range(combinations_num)],
reverse=True)
return prices
def create_products_by_class(product_class, schema,
placeholder_dir, how_many=10, create_images=True,
stdout=None):
category_name = schema.get('category') or DEFAULT_CATEGORY
category = get_or_create_category(category_name)
for dummy in range(how_many):
product = create_product(product_class=product_class)
set_product_attributes(product, product_class)
product.categories.add(category)
if create_images:
class_placeholders = os.path.join(
placeholder_dir, schema['images_dir'])
create_product_images(
product, random.randrange(1, 5), class_placeholders)
variant_combinations = get_variant_combinations(product)
prices = get_price_override(
schema, len(variant_combinations), product.price)
variants_with_prices = moves.zip_longest(
variant_combinations, prices)
for i, variant_price in enumerate(variants_with_prices, start=1337):
attr_combination, price = variant_price
sku = '%s-%s' % (product.pk, i)
create_variant(
product, attributes=attr_combination, sku=sku,
price_override=price)
if not variant_combinations:
# Create min one variant for products without variant level attrs
sku = '%s-%s' % (product.pk, fake.random_int(1000, 100000))
create_variant(product, sku=sku)
if stdout is not None:
stdout.write('Product: %s (%s), %s variant(s)' % (
product, product_class.name, len(variant_combinations) or 1))
def create_products_by_schema(placeholder_dir, how_many, create_images,
stdout=None, schema=DEFAULT_SCHEMA):
for product_class, class_schema in create_product_classes_by_schema(schema):
create_products_by_class(
product_class, class_schema, placeholder_dir,
how_many=how_many, create_images=create_images, stdout=stdout)
class SaleorProvider(BaseProvider):
def price(self):
return Price(fake.pydecimal(2, 2, positive=True),
currency=settings.DEFAULT_CURRENCY)
def delivery_region(self):
return random.choice(DELIVERY_REGIONS)
def shipping_method(self):
return random.choice(ShippingMethod.objects.all())
fake.add_provider(SaleorProvider)
def get_email(first_name, last_name):
_first = unicodedata.normalize('NFD', first_name).encode('ascii', 'ignore')
_last = unicodedata.normalize('NFD', last_name).encode('ascii', 'ignore')
return '%s.%s@example.com' % (
_first.lower().decode('utf-8'), _last.lower().decode('utf-8'))
def get_or_create_category(name, **kwargs):
defaults = {
'description': fake.text()}
defaults.update(kwargs)
defaults['slug'] = fake.slug(name)
return Category.objects.get_or_create(name=name, defaults=defaults)[0]
def get_or_create_product_class(name, **kwargs):
return ProductClass.objects.get_or_create(name=name, defaults=kwargs)[0]
def create_product(**kwargs):
defaults = {
'name': fake.company(),
'price': fake.price(),
'description': '\n\n'.join(fake.paragraphs(5))}
defaults.update(kwargs)
return Product.objects.create(**defaults)
def create_stock(variant, **kwargs):
default_location = StockLocation.objects.get_or_create(
name=STOCK_LOCATION)[0]
defaults = {
'variant': variant,
'location': default_location,
'quantity': fake.random_int(1, 50)}
defaults.update(kwargs)
return Stock.objects.create(**defaults)
def create_variant(product, **kwargs):
defaults = {
'product': product}
defaults.update(kwargs)
variant = ProductVariant.objects.create(**defaults)
create_stock(variant)
return variant
def create_product_image(product, placeholder_dir):
placeholder_root = os.path.join(settings.PROJECT_ROOT, placeholder_dir)
img_path = '%s/%s' % (placeholder_dir,
random.choice(os.listdir(placeholder_root)))
image = ProductImage(
product=product,
image=File(open(img_path, 'rb'))).save()
return image
def create_attribute(**kwargs):
slug = fake.word()
defaults = {
'slug': slug,
'name': slug.title()}
defaults.update(kwargs)
attribute = ProductAttribute.objects.get_or_create(**defaults)[0]
return attribute
def create_attribute_value(attribute, **kwargs):
name = fake.word()
defaults = {
'attribute': attribute,
'name': name}
defaults.update(kwargs)
defaults['slug'] = slugify(defaults['name'])
attribute_value = AttributeChoiceValue.objects.get_or_create(**defaults)[0]
return attribute_value
def create_product_images(product, how_many, placeholder_dir):
for dummy in range(how_many):
create_product_image(product, placeholder_dir)
def create_address():
address = Address.objects.create(
first_name=fake.first_name(),
last_name=fake.last_name(),
street_address_1=fake.street_address(),
city=fake.city(),
postal_code=fake.postcode(),
country=fake.country_code())
return address
def create_fake_user():
address = create_address()
email = get_email(address.first_name, address.last_name)
user = User.objects.create_user(email=email, password='password')
user.addresses.add(address)
user.default_billing_address = address
user.default_shipping_address = address
user.is_active = True
user.save()
return user
def create_payment(delivery_group):
order = delivery_group.order
status = random.choice(
[PaymentStatus.WAITING, PaymentStatus.PREAUTH, PaymentStatus.CONFIRMED])
payment = Payment.objects.create(
order=order,
status=status,
variant='default',
transaction_id=str(fake.random_int(1, 100000)),
currency=settings.DEFAULT_CURRENCY,
total=order.get_total().gross,
delivery=delivery_group.shipping_price.gross,
customer_ip_address=fake.ipv4(),
billing_first_name=order.billing_address.first_name,
billing_last_name=order.billing_address.last_name,
billing_address_1=order.billing_address.street_address_1,
billing_city=order.billing_address.city,
billing_postcode=order.billing_address.postal_code,
billing_country_code=order.billing_address.country)
if status == PaymentStatus.CONFIRMED:
payment.captured_amount = payment.total
payment.save()
return payment
def create_delivery_group(order):
region = order.shipping_address.country
if region not in DELIVERY_REGIONS:
region = ANY_COUNTRY
shipping_method = fake.shipping_method()
shipping_country = shipping_method.price_per_country.get_or_create(
country_code=region, defaults={'price': fake.price()})[0]
delivery_group = DeliveryGroup.objects.create(
status=random.choice(['new', 'shipped']),
order=order,
shipping_method_name=str(shipping_country),
shipping_price=shipping_country.price)
return delivery_group
def create_order_line(delivery_group):
product = Product.objects.all().order_by('?')[0]
variant = product.variants.all()[0]
return OrderedItem.objects.create(
delivery_group=delivery_group,
product=product,
product_name=product.name,
product_sku=variant.sku,
quantity=random.randrange(1, 5),
unit_price_net=product.price.net,
unit_price_gross=product.price.gross)
def create_order_lines(delivery_group, how_many=10):
for dummy in range(how_many):
yield create_order_line(delivery_group)
def create_fake_order():
user = random.choice([None, User.objects.filter(
is_superuser=False).order_by('?').first()])
if user:
user_data = {
'user': user,
'billing_address': user.default_billing_address,
'shipping_address': user.default_shipping_address}
else:
address = create_address()
user_data = {
'billing_address': address,
'shipping_address': address,
'user_email': get_email(
address.first_name, address.last_name)}
order = Order.objects.create(**user_data)
order.change_status(OrderStatus.PAYMENT_PENDING)
delivery_group = create_delivery_group(order)
lines = create_order_lines(delivery_group, random.randrange(1, 5))
order.total = sum(
[line.get_total() for line in lines], delivery_group.shipping_price)
order.save()
payment = create_payment(delivery_group)
if payment.status == PaymentStatus.CONFIRMED:
order.change_status(OrderStatus.FULLY_PAID)
if random.choice([True, False]):
order.change_status(OrderStatus.SHIPPED)
return order
def create_fake_sale():
sale = Sale.objects.create(
name='Happy %s day!' % fake.word(),
type=Sale.PERCENTAGE,
value=random.choice([10, 20, 30, 40, 50]))
for product in Product.objects.all().order_by('?')[:4]:
sale.products.add(product)
return sale
def create_users(how_many=10):
for dummy in range(how_many):
user = create_fake_user()
yield 'User: %s' % (user.email,)
def create_orders(how_many=10):
for dummy in range(how_many):
order = create_fake_order()
yield 'Order: %s' % (order,)
def create_product_sales(how_many=5):
for dummy in range(how_many):
sale = create_fake_sale()
yield 'Sale: %s' % (sale,)
def create_shipping_methods():
shipping_method = ShippingMethod.objects.create(name='UPC')
shipping_method.price_per_country.create(price=fake.price())
yield 'Shipping method #%d' % shipping_method.id
shipping_method = ShippingMethod.objects.create(name='DHL')
shipping_method.price_per_country.create(price=fake.price())
yield 'Shipping method #%d' % shipping_method.id
def create_vouchers():
voucher, created = Voucher.objects.get_or_create(
code='FREESHIPPING', defaults={
'type': Voucher.SHIPPING_TYPE,
'name': 'Free shipping',
'discount_value_type': Voucher.DISCOUNT_VALUE_PERCENTAGE,
'discount_value': 100})
if created:
yield 'Voucher #%d' % voucher.id
else:
yield 'Shipping voucher already exists'
voucher, created = Voucher.objects.get_or_create(
code='DISCOUNT', defaults={
'type': Voucher.VALUE_TYPE,
'name': 'Big order discount',
'discount_value_type': Voucher.DISCOUNT_VALUE_FIXED,
'discount_value': 25,
'limit': 200})
if created:
yield 'Voucher #%d' % voucher.id
else:
yield 'Value voucher already exists'
def set_featured_products(how_many=8):
pks = Product.objects.order_by('?')[:how_many].values_list('pk', flat=True)
Product.objects.filter(pk__in=pks).update(is_featured=True)
yield 'Featured products created'
def add_address_to_admin(email):
address = create_address()
user = User.objects.get(email=email)
store_user_address(user, address, True, True)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for instances, volumes, and floating ips."""
import sys
from oslo.config import cfg
import webob
from neutron.common import exceptions
from neutron.common import legacy
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
QUOTA_DB_MODULE = 'neutron.db.quota_db'
QUOTA_DB_DRIVER = 'neutron.db.quota_db.DbQuotaDriver'
QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver'
quota_opts = [
cfg.ListOpt('quota_items',
default=['network', 'subnet', 'port'],
help=_('Resource name(s) that are supported in quota '
'features')),
cfg.IntOpt('default_quota',
default=-1,
help=_('Default number of resource allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_network',
default=10,
help=_('Number of networks allowed per tenant.'
'A negative value means unlimited.')),
cfg.IntOpt('quota_subnet',
default=10,
help=_('Number of subnets allowed per tenant, '
'A negative value means unlimited.')),
cfg.IntOpt('quota_port',
default=50,
help=_('Number of ports allowed per tenant. '
'A negative value means unlimited.')),
cfg.StrOpt('quota_driver',
default=QUOTA_DB_DRIVER,
help=_('Default driver to use for quota checks')),
]
# Register the configuration options
cfg.CONF.register_opts(quota_opts, 'QUOTAS')
legacy.override_config(cfg.CONF, [('QUOTAS', 'quota_driver')])
class ConfDriver(object):
"""Configuration driver.
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the default values
in neutron.conf.
"""
def _get_quotas(self, context, resources, keys):
"""Get quotas.
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve.
"""
# Filter resources
desired = set(keys)
sub_resources = dict((k, v) for k, v in resources.items()
if k in desired)
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
unknown = desired - set(sub_resources.keys())
raise exceptions.QuotaResourceUnknown(unknown=sorted(unknown))
quotas = {}
for resource in sub_resources.values():
quotas[resource.name] = resource.default
return quotas
def limit_check(self, context, tenant_id,
resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param tennant_id: The tenant_id to check quota.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exceptions.InvalidQuotaValue(unders=sorted(unders))
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys())
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
@staticmethod
def get_tenant_quotas(context, resources, tenant_id):
quotas = {}
sub_resources = dict((k, v) for k, v in resources.items())
for resource in sub_resources.values():
quotas[resource.name] = resource.default
return quotas
@staticmethod
def get_all_quotas(context, resources):
return []
@staticmethod
def delete_tenant_quota(context, tenant_id):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
@staticmethod
def update_quota_limit(context, tenant_id, resource, limit):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag):
"""Initializes a resource.
:param name: The name of the resource, i.e., "instances".
:param flag: The name of the flag or configuration option
"""
self.name = name
self.flag = flag
@property
def default(self):
"""Return the default value of the quota."""
return getattr(cfg.CONF.QUOTAS,
self.flag,
cfg.CONF.QUOTAS.default_quota)
class CountableResource(BaseResource):
"""Describe a resource where the counts are determined by a function."""
def __init__(self, name, count, flag=None):
"""Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., netowk, subnet,
etc.,. A CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
:param name: The name of the resource, i.e., "instances".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(CountableResource, self).__init__(name, flag=flag)
self.count = count
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
self._resources = {}
self._driver = None
self._driver_class = quota_driver_class
def get_driver(self):
if self._driver is None:
_driver_class = (self._driver_class or
cfg.CONF.QUOTAS.quota_driver)
if (_driver_class == QUOTA_DB_DRIVER and
QUOTA_DB_MODULE not in sys.modules):
# If quotas table is not loaded, force config quota driver.
_driver_class = QUOTA_CONF_DRIVER
LOG.info(_("ConfDriver is used as quota_driver because the "
"loaded plugin does not support 'quotas' table."))
if isinstance(_driver_class, basestring):
_driver_class = importutils.import_object(_driver_class)
self._driver = _driver_class
LOG.info(_('Loaded quota_driver: %s.'), _driver_class)
return self._driver
def __contains__(self, resource):
return resource in self._resources
def register_resource(self, resource):
"""Register a resource."""
if resource.name in self._resources:
LOG.warn(_('%s is already registered.'), resource.name)
return
self._resources[resource.name] = resource
def register_resource_by_name(self, resourcename):
"""Register a resource by name."""
resource = CountableResource(resourcename, _count_resource,
'quota_' + resourcename)
self.register_resource(resource)
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource: The name of the resource, as a string.
"""
# Get the resource
res = self._resources.get(resource)
if not res or not hasattr(res, 'count'):
raise exceptions.QuotaResourceUnknown(unknown=[resource])
return res.count(context, *args, **kwargs)
def limit_check(self, context, tenant_id, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
"""
return self.get_driver().limit_check(context, tenant_id,
self._resources, values)
@property
def resources(self):
return self._resources
QUOTAS = QuotaEngine()
def _count_resource(context, plugin, resources, tenant_id):
count_getter_name = "get_%s_count" % resources
# Some plugins support a count method for particular resources,
# using a DB's optimized counting features. We try to use that one
# if present. Otherwise just use regular getter to retrieve all objects
# and count in python, allowing older plugins to still be supported
try:
obj_count_getter = getattr(plugin, count_getter_name)
return obj_count_getter(context, filters={'tenant_id': [tenant_id]})
except (NotImplementedError, AttributeError):
obj_getter = getattr(plugin, "get_%s" % resources)
obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]})
return len(obj_list) if obj_list else 0
def register_resources_from_config():
resources = []
for resource_item in cfg.CONF.QUOTAS.quota_items:
resources.append(CountableResource(resource_item, _count_resource,
'quota_' + resource_item))
QUOTAS.register_resources(resources)
register_resources_from_config()
|
|
#!/usr/bin/env python
from __future__ import print_function
import sys, os, datetime, time, shutil, tarfile, tempfile, subprocess, os.path
from optparse import OptionParser
from ._backup import *
info = "'rethinkdb dump' creates an archive of data from a RethinkDB cluster"
usage = "rethinkdb dump [-c HOST:PORT] [-p] [--password-file FILENAME] [--tls-cert FILENAME] [-f FILE] [--clients NUM] [-e (DB | DB.TABLE)]..."
def print_dump_help():
print(info)
print(usage)
print("")
print(" -h [ --help ] print this help")
print(" -c [ --connect ] HOST:PORT host and client port of a rethinkdb node to connect")
print(" to (defaults to localhost:28015)")
print(" --tls-cert FILENAME certificate file to use for TLS encryption.")
print(" -p [ --password ] interactively prompt for a password required to connect.")
print(" --password-file FILENAME read password required to connect from file.")
print(" -f [ --file ] FILE file to write archive to (defaults to")
print(" rethinkdb_dump_DATE_TIME.tar.gz);")
print(" if FILE is -, use standard output (note that")
print(" intermediate files will still be written to")
print(" the --temp-dir directory)")
print(" -e [ --export ] (DB | DB.TABLE) limit dump to the given database or table (may")
print(" be specified multiple times)")
print(" --clients NUM_CLIENTS number of tables to export simultaneously (defaults")
print(" to 3)")
print(" --temp-dir DIRECTORY the directory to use for intermediary results")
print(" --overwrite-file don't abort when file given via --file already exists")
print(" -q [ --quiet ] suppress non-error messages")
print("")
print("EXAMPLES:")
print("rethinkdb dump -c mnemosyne:39500")
print(" Archive all data from a cluster running on host 'mnemosyne' with a client port at 39500.")
print("")
print("rethinkdb dump -e test -f rdb_dump.tar.gz")
print(" Archive only the 'test' database from a local cluster into a named file.")
print("")
print("rethinkdb dump -c hades -e test.subscribers -p")
print(" Archive a specific table from a cluster running on host 'hades' which requires a password.")
def parse_options():
parser = OptionParser(add_help_option=False, usage=usage)
parser.add_option("-c", "--connect", dest="host", metavar="host:port", default="localhost:28015", type="string")
parser.add_option("-f", "--file", dest="out_file", metavar="file", default=None, type="string")
parser.add_option("-e", "--export", dest="tables", metavar="(db | db.table)", default=[], action="append", type="string")
parser.add_option("--tls-cert", dest="tls_cert", metavar="TLS_CERT", default="", type="string")
parser.add_option("--temp-dir", dest="temp_dir", metavar="directory", default=None, type="string")
parser.add_option("--overwrite-file", dest="overwrite_file", default=False, action="store_true")
parser.add_option("--clients", dest="clients", metavar="NUM", default=3, type="int")
parser.add_option("-q", "--quiet", dest="quiet", default=False, action="store_true")
parser.add_option("--debug", dest="debug", default=False, action="store_true")
parser.add_option("-h", "--help", dest="help", default=False, action="store_true")
parser.add_option("-p", "--password", dest="password", default=False, action="store_true")
parser.add_option("--password-file", dest="password_file", default=None, type="string")
(options, args) = parser.parse_args()
# Check validity of arguments
if len(args) != 0:
raise RuntimeError("Error: No positional arguments supported. Unrecognized option '%s'" % args[0])
if options.help:
print_dump_help()
exit(0)
res = {}
# Verify valid host:port --connect option
(res["host"], res["port"]) = parse_connect_option(options.host)
res["tls_cert"] = options.tls_cert
# Verify valid output file
if sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
res["temp_filename"] = "rethinkdb_dump_%s" % datetime.datetime.today().strftime("%Y-%m-%dT%H-%M-%S")
else:
res["temp_filename"] = "rethinkdb_dump_%s" % datetime.datetime.today().strftime("%Y-%m-%dT%H:%M:%S")
if options.out_file == "-":
res["out_file"] = sys.stdout
else:
# The output file is a real file in the file system
if options.out_file is None:
res["out_file"] = os.path.abspath("./" + res["temp_filename"] + ".tar.gz")
else:
res["out_file"] = os.path.abspath(options.out_file)
if os.path.exists(res["out_file"]) and not options.overwrite_file:
raise RuntimeError("Error: Output file already exists: %s" % res["out_file"])
# Verify valid client count
if options.clients < 1:
raise RuntimeError("Error: invalid number of clients (%d), must be greater than zero" % options.clients)
res["clients"] = options.clients
# Make sure the temporary directory exists and is accessible
res["temp_dir"] = options.temp_dir
if res["temp_dir"] is not None:
if not os.path.isdir(res["temp_dir"]):
raise RuntimeError("Error: Temporary directory doesn't exist or is not a directory: %s" % res["temp_dir"])
if not os.access(res["temp_dir"], os.W_OK):
raise RuntimeError("Error: Temporary directory inaccessible: %s" % res["temp_dir"])
res["tables"] = options.tables
res["quiet"] = True if res["out_file"] is sys.stdout else options.quiet
res["debug"] = options.debug
res["password"] = options.password
res["password-file"] = options.password_file
return res
def do_export(temp_dir, options):
if not options["quiet"]:
print("Exporting to directory...")
export_args = ["rethinkdb-export"]
export_args.extend(["--connect", "%s:%s" % (options["host"], options["port"])])
export_args.extend(["--directory", os.path.join(temp_dir, options["temp_filename"])])
if options["password"]:
export_args.append("--password")
if options["password-file"]:
export_args.extend(["--password-file", options["password-file"]])
export_args.extend(["--clients", str(options["clients"])])
export_args.extend(["--tls-cert", options["tls_cert"]])
for table in options["tables"]:
export_args.extend(["--export", table])
if options["debug"]:
export_args.extend(["--debug"])
if options["quiet"]:
export_args.extend(["--quiet"])
res = subprocess.call(export_args)
if res != 0:
raise RuntimeError("Error: rethinkdb-export failed")
# 'Done' message will be printed by the export script (unless options["quiet"])
def do_zip(temp_dir, options):
if not options["quiet"]:
print("Zipping export directory...")
start_time = time.time()
original_dir = os.getcwd()
# Below,` tarfile.open()` forces us to set either `name` or `fileobj`,
# depending on whether the output is a real file or an open file object.
is_fileobj = type(options["out_file"]) is file
name = None if is_fileobj else options["out_file"]
fileobj = options["out_file"] if is_fileobj else None
try:
os.chdir(temp_dir)
with tarfile.open(name=name, fileobj=fileobj, mode="w:gz") as f:
for curr, subdirs, files in os.walk(options["temp_filename"]):
for data_file in files:
path = os.path.join(curr, data_file)
f.add(path)
os.unlink(path)
finally:
os.chdir(original_dir)
if not options["quiet"]:
print(" Done (%d seconds)" % (time.time() - start_time))
def run_rethinkdb_export(options):
# Create a temporary directory to store the intermediary results
temp_dir = tempfile.mkdtemp(dir=options["temp_dir"])
res = -1
if not options["quiet"]:
# Print a warning about the capabilities of dump, so no one is confused (hopefully)
print("NOTE: 'rethinkdb-dump' saves data and secondary indexes, but does *not* save")
print(" cluster metadata. You will need to recreate your cluster setup yourself after ")
print(" you run 'rethinkdb-restore'.")
try:
do_export(temp_dir, options)
do_zip(temp_dir, options)
except KeyboardInterrupt:
time.sleep(0.2)
raise RuntimeError("Interrupted")
finally:
shutil.rmtree(temp_dir)
def main():
try:
options = parse_options()
except RuntimeError as ex:
print("Usage: %s" % usage, file=sys.stderr)
print(ex, file=sys.stderr)
return 1
try:
start_time = time.time()
run_rethinkdb_export(options)
except RuntimeError as ex:
print(ex, file=sys.stderr)
return 1
return 0
if __name__ == "__main__":
exit(main())
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import utils
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.task import validation
class KeystoneBasic(kutils.KeystoneScenario):
"""Basic benchmark scenarios for Keystone."""
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_user is ignored",
"0.1.2", ["name_length"], once=True)
def create_user(self, name_length=10, **kwargs):
"""Create a keystone user with random name.
:param kwargs: Other optional parameters to create users like
"tenant_id", "enabled".
"""
self._user_create(**kwargs)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_delete_user is ignored",
"0.1.2", ["name_length"], once=True)
def create_delete_user(self, name_length=10, **kwargs):
"""Create a keystone user with random name and then delete it.
:param kwargs: Other optional parameters to create users like
"tenant_id", "enabled".
"""
user = self._user_create(**kwargs)
self._resource_delete(user)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_user_set_enabled_and_delete(self, enabled=True, **kwargs):
"""Create a keystone user, enable or disable it, and delete it.
:param enabled: Initial state of user 'enabled' flag. The user
will be created with 'enabled' set to this
value, and then it will be toggled.
:param kwargs: Other optional parameters to create user.
"""
user = self._user_create(enabled=enabled, **kwargs)
self._update_user_enabled(user, not enabled)
self._resource_delete(user)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_tenant is ignored",
"0.1.2", ["name_length"], once=True)
def create_tenant(self, name_length=10, **kwargs):
"""Create a keystone tenant with random name.
:param kwargs: Other optional parameters
"""
self._tenant_create(**kwargs)
@validation.number("users_per_tenant", minval=1)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_tenant_with_users is ignored",
"0.1.2", ["name_length"], once=True)
def create_tenant_with_users(self, users_per_tenant, name_length=10,
**kwargs):
"""Create a keystone tenant and several users belonging to it.
:param users_per_tenant: number of users to create for the tenant
:param kwargs: Other optional parameters for tenant creation
:returns: keystone tenant instance
"""
tenant = self._tenant_create(**kwargs)
self._users_create(tenant, users_per_tenant=users_per_tenant)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_and_list_users is ignored",
"0.1.2", ["name_length"], once=True)
def create_and_list_users(self, name_length=10, **kwargs):
"""Create a keystone user with random name and list all users.
:param kwargs: Other optional parameters to create users like
"tenant_id", "enabled".
"""
self._user_create(**kwargs)
self._list_users()
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_and_list_tenants is ignored",
"0.1.2", ["name_length"], once=True)
def create_and_list_tenants(self, name_length=10, **kwargs):
"""Create a keystone tenant with random name and list all tenants.
:param kwargs: Other optional parameters
"""
self._tenant_create(**kwargs)
self._list_tenants()
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def add_and_remove_user_role(self):
"""Create a user role add to a user and disassociate."""
tenant_id = self.context["tenant"]["id"]
user_id = self.context["user"]["id"]
role = self._role_create()
self._role_add(user_id, role, tenant_id)
self._role_remove(user_id, role, tenant_id)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_and_delete_role(self):
"""Create a user role and delete it."""
role = self._role_create()
self._resource_delete(role)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_add_and_list_user_roles(self):
"""Create user role, add it and list user roles for given user."""
tenant_id = self.context["tenant"]["id"]
user_id = self.context["user"]["id"]
role = self._role_create()
self._role_add(user_id, role, tenant_id)
self._list_roles_for_user(user_id, tenant_id)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def get_entities(self, service_name="keystone"):
"""Get instance of a tenant, user, role and service by id's.
An ephemeral tenant, user, and role are each created. By
default, fetches the 'keystone' service. This can be
overridden (for instance, to get the 'Identity Service'
service on older OpenStack), or None can be passed explicitly
to service_name to create a new service and then query it by
ID.
:param service_name: The name of the service to get by ID; or
None, to create an ephemeral service and
get it by ID.
"""
tenant = self._tenant_create()
user = self._user_create()
role = self._role_create()
self._get_tenant(tenant.id)
self._get_user(user.id)
self._get_role(role.id)
if service_name is None:
service = self._service_create()
else:
service = self._get_service_by_name(service_name)
self._get_service(service.id)
@validation.required_openstack(admin=True)
@utils.log_deprecated_args(
"The 'name' argument to create_and_delete_service will be ignored",
"0.0.5", ["name"])
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_and_delete_service(self, name=None, service_type=None,
description=None):
"""Create and delete service.
:param service_type: type of the service
:param description: description of the service
"""
service = self._service_create(service_type, description)
self._delete_service(service.id)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' argument to create_update_and_delete_tenant is "
"ignored", "0.1.2", ["name_length"], once=True)
def create_update_and_delete_tenant(self, name_length=None, **kwargs):
"""Create, update and delete tenant.
:param kwargs: Other optional parameters for tenant creation
"""
tenant = self._tenant_create(**kwargs)
self._update_tenant(tenant)
self._resource_delete(tenant)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
@utils.log_deprecated_args(
"The 'name_length' and 'password_length' arguments to "
"create_user_update_password are ignored",
"0.1.2", ["name_length", "password_length"], once=True)
def create_user_update_password(self, name_length=None,
password_length=None):
"""Create user and update password for that user."""
password = self.generate_random_name()
user = self._user_create()
self._update_user_password(user.id, password)
@validation.required_openstack(admin=True)
@utils.log_deprecated_args(
"The 'name' argument to create_and_list_services will be ignored",
"0.0.5", ["name"])
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_and_list_services(self, name=None, service_type=None,
description=None):
"""Create and list services.
:param service_type: type of the service
:param description: description of the service
"""
self._service_create(service_type, description)
self._list_services()
@validation.required_openstack(users=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_and_list_ec2credentials(self):
"""Create and List all keystone ec2-credentials."""
self._create_ec2credentials(self.context["user"]["id"],
self.context["tenant"]["id"])
self._list_ec2credentials(self.context["user"]["id"])
@validation.required_openstack(users=True)
@scenario.configure(context={"admin_cleanup": ["keystone"]})
def create_and_delete_ec2credential(self):
"""Create and delete keystone ec2-credential."""
creds = self._create_ec2credentials(self.context["user"]["id"],
self.context["tenant"]["id"])
self._delete_ec2credential(self.context["user"]["id"], creds.access)
|
|
import unittest
from django.core.exceptions import FieldError
from django.db import IntegrityError, connection, transaction
from django.db.models import CharField, Count, F, IntegerField, Max
from django.db.models.functions import Abs, Concat, Lower
from django.test import TestCase
from django.test.utils import register_lookup
from .models import (
A, B, Bar, D, DataPoint, Foo, RelatedPoint, UniqueNumber,
UniqueNumberChild,
)
class SimpleTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = A.objects.create()
cls.a2 = A.objects.create()
for x in range(20):
B.objects.create(a=cls.a1)
D.objects.create(a=cls.a1)
def test_nonempty_update(self):
"""
Update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_foreign_key_update_with_id(self):
"""
Update works using <field>_id for foreign keys
"""
num_updated = self.a1.d_set.update(a_id=self.a2)
self.assertEqual(num_updated, 20)
self.assertEqual(self.a2.d_set.count(), 20)
class AdvancedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.d0 = DataPoint.objects.create(name="d0", value="apple")
cls.d2 = DataPoint.objects.create(name="d2", value="banana")
cls.d3 = DataPoint.objects.create(name="d3", value="banana")
cls.r1 = RelatedPoint.objects.create(name="r1", data=cls.d3)
def test_update(self):
"""
Objects are updated by first filtering the candidates into a queryset
and then calling the update() method. It executes immediately and
returns nothing.
"""
resp = DataPoint.objects.filter(value="apple").update(name="d1")
self.assertEqual(resp, 1)
resp = DataPoint.objects.filter(value="apple")
self.assertEqual(list(resp), [self.d0])
def test_update_multiple_objects(self):
"""
We can update multiple objects at once.
"""
resp = DataPoint.objects.filter(value='banana').update(value='pineapple')
self.assertEqual(resp, 2)
self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple')
def test_update_fk(self):
"""
Foreign key fields can also be updated, although you can only update
the object referred to, not anything inside the related object.
"""
resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0)
self.assertEqual(resp, 1)
resp = RelatedPoint.objects.filter(data__name="d0")
self.assertEqual(list(resp), [self.r1])
def test_update_multiple_fields(self):
"""
Multiple fields can be updated at once
"""
resp = DataPoint.objects.filter(value="apple").update(
value="fruit", another_value="peach")
self.assertEqual(resp, 1)
d = DataPoint.objects.get(name="d0")
self.assertEqual(d.value, 'fruit')
self.assertEqual(d.another_value, 'peach')
def test_update_all(self):
"""
In the rare case you want to update every instance of a model, update()
is also a manager method.
"""
self.assertEqual(DataPoint.objects.update(value='thing'), 3)
resp = DataPoint.objects.values('value').distinct()
self.assertEqual(list(resp), [{'value': 'thing'}])
def test_update_slice_fail(self):
"""
We do not support update on already sliced query sets.
"""
method = DataPoint.objects.all()[:2].update
msg = 'Cannot update a query once a slice has been taken.'
with self.assertRaisesMessage(TypeError, msg):
method(another_value='another thing')
def test_update_respects_to_field(self):
"""
Update of an FK field which specifies a to_field works.
"""
a_foo = Foo.objects.create(target='aaa')
b_foo = Foo.objects.create(target='bbb')
bar = Bar.objects.create(foo=a_foo)
self.assertEqual(bar.foo_id, a_foo.target)
bar_qs = Bar.objects.filter(pk=bar.pk)
self.assertEqual(bar_qs[0].foo_id, a_foo.target)
bar_qs.update(foo=b_foo)
self.assertEqual(bar_qs[0].foo_id, b_foo.target)
def test_update_m2m_field(self):
msg = (
'Cannot update model field '
'<django.db.models.fields.related.ManyToManyField: m2m_foo> '
'(only non-relations and foreign keys permitted).'
)
with self.assertRaisesMessage(FieldError, msg):
Bar.objects.update(m2m_foo='whatever')
def test_update_transformed_field(self):
A.objects.create(x=5)
A.objects.create(x=-6)
with register_lookup(IntegerField, Abs):
A.objects.update(x=F('x__abs'))
self.assertCountEqual(A.objects.values_list('x', flat=True), [5, 6])
def test_update_annotated_queryset(self):
"""
Update of a queryset that's been annotated.
"""
# Trivial annotated update
qs = DataPoint.objects.annotate(alias=F('value'))
self.assertEqual(qs.update(another_value='foo'), 3)
# Update where annotation is used for filtering
qs = DataPoint.objects.annotate(alias=F('value')).filter(alias='apple')
self.assertEqual(qs.update(another_value='foo'), 1)
# Update where annotation is used in update parameters
qs = DataPoint.objects.annotate(alias=F('value'))
self.assertEqual(qs.update(another_value=F('alias')), 3)
# Update where aggregation annotation is used in update parameters
qs = DataPoint.objects.annotate(max=Max('value'))
msg = (
'Aggregate functions are not allowed in this query '
'(another_value=Max(Col(update_datapoint, update.DataPoint.value))).'
)
with self.assertRaisesMessage(FieldError, msg):
qs.update(another_value=F('max'))
def test_update_annotated_multi_table_queryset(self):
"""
Update of a queryset that's been annotated and involves multiple tables.
"""
# Trivial annotated update
qs = DataPoint.objects.annotate(related_count=Count('relatedpoint'))
self.assertEqual(qs.update(value='Foo'), 3)
# Update where annotation is used for filtering
qs = DataPoint.objects.annotate(related_count=Count('relatedpoint'))
self.assertEqual(qs.filter(related_count=1).update(value='Foo'), 1)
# Update where aggregation annotation is used in update parameters
qs = RelatedPoint.objects.annotate(max=Max('data__value'))
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
qs.update(name=F('max'))
def test_update_with_joined_field_annotation(self):
msg = 'Joined field references are not permitted in this query'
with register_lookup(CharField, Lower):
for annotation in (
F('data__name'),
F('data__name__lower'),
Lower('data__name'),
Concat('data__name', 'data__value'),
):
with self.subTest(annotation=annotation):
with self.assertRaisesMessage(FieldError, msg):
RelatedPoint.objects.annotate(
new_name=annotation,
).update(name=F('new_name'))
@unittest.skipUnless(
connection.vendor == 'mysql',
'UPDATE...ORDER BY syntax is supported on MySQL/MariaDB',
)
class MySQLUpdateOrderByTest(TestCase):
"""Update field with a unique constraint using an ordered queryset."""
@classmethod
def setUpTestData(cls):
UniqueNumber.objects.create(number=1)
UniqueNumber.objects.create(number=2)
def test_order_by_update_on_unique_constraint(self):
tests = [
('-number', 'id'),
(F('number').desc(), 'id'),
(F('number') * -1, 'id'),
]
for ordering in tests:
with self.subTest(ordering=ordering), transaction.atomic():
updated = UniqueNumber.objects.order_by(*ordering).update(
number=F('number') + 1,
)
self.assertEqual(updated, 2)
def test_order_by_update_on_unique_constraint_annotation(self):
# Ordering by annotations is omitted because they cannot be resolved in
# .update().
with self.assertRaises(IntegrityError):
UniqueNumber.objects.annotate(
number_inverse=F('number').desc(),
).order_by('number_inverse').update(
number=F('number') + 1,
)
def test_order_by_update_on_parent_unique_constraint(self):
# Ordering by inherited fields is omitted because joined fields cannot
# be used in the ORDER BY clause.
UniqueNumberChild.objects.create(number=3)
UniqueNumberChild.objects.create(number=4)
with self.assertRaises(IntegrityError):
UniqueNumberChild.objects.order_by('number').update(
number=F('number') + 1,
)
def test_order_by_update_on_related_field(self):
# Ordering by related fields is omitted because joined fields cannot be
# used in the ORDER BY clause.
data = DataPoint.objects.create(name='d0', value='apple')
related = RelatedPoint.objects.create(name='r0', data=data)
with self.assertNumQueries(1) as ctx:
updated = RelatedPoint.objects.order_by('data__name').update(name='new')
sql = ctx.captured_queries[0]['sql']
self.assertNotIn('ORDER BY', sql)
self.assertEqual(updated, 1)
related.refresh_from_db()
self.assertEqual(related.name, 'new')
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import datetime
import functools
import inspect
import json
import lockfile
import netaddr
import os
import random
import re
import shlex
import socket
import struct
import sys
import time
import types
import uuid
import pyclbr
from xml.sax import saxutils
from eventlet import event
from eventlet import greenthread
from eventlet import semaphore
from eventlet.green import subprocess
from nova import exception
from nova import flags
from nova import log as logging
from nova import version
LOG = logging.getLogger("nova.utils")
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
FLAGS = flags.FLAGS
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
LOG.debug(_('Inner Exception: %s'), exc)
raise exception.ClassNotFound(class_name=class_str)
def import_object(import_str):
"""Returns an object including a module or module and class."""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
def fetchfile(url, target):
LOG.debug(_('Fetching %s') % url)
execute('curl', '--fail', url, '-o', target)
def execute(*cmd, **kwargs):
"""
Helper method to execute command with optional retry.
:cmd Passed to subprocess.Popen.
:process_input Send to opened process.
:check_exit_code Defaults to 0. Raise exception.ProcessExecutionError
unless program exits with this code.
:delay_on_retry True | False. Defaults to True. If set to True, wait a
short amount of time before retrying.
:attempts How many times to retry cmd.
:run_as_root True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper FLAG.
:raises exception.Error on receiving unknown arguments
:raises exception.ProcessExecutionError
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', 0)
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
if len(kwargs):
raise exception.Error(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
cmd = shlex.split(FLAGS.root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if type(check_exit_code) == types.IntType \
and _returncode != check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd))
if addl_env:
raise exception.Error(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise exception.Error(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
#stdin.write('process_input would go here')
#stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return (stdout, stderr)
def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
def novadir():
import nova
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
def default_flagfile(filename='nova.conf', args=None):
if args is None:
args = sys.argv
for arg in args:
if arg.find('flagfile') != -1:
break
else:
if not os.path.isabs(filename):
# turn relative filename into an absolute path
script_dir = os.path.dirname(inspect.stack()[-1][1])
filename = os.path.abspath(os.path.join(script_dir, filename))
if not os.path.exists(filename):
filename = "./nova.conf"
if not os.path.exists(filename):
filename = '/etc/nova/nova.conf'
if os.path.exists(filename):
flagfile = '--flagfile=%s' % filename
args.insert(1, flagfile)
def debug(arg):
LOG.debug(_('debug in callback: %s'), arg)
return arg
def runthis(prompt, *cmd, **kwargs):
LOG.debug(_('Running %s'), (' '.join(cmd)))
rv, err = execute(*cmd, **kwargs)
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ' # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def usage_from_instance(instance_ref, **kw):
usage_info = dict(
project_id=instance_ref['project_id'],
user_id=instance_ref['user_id'],
instance_id=instance_ref['id'],
instance_type=instance_ref['instance_type']['name'],
instance_type_id=instance_ref['instance_type_id'],
display_name=instance_ref['display_name'],
created_at=str(instance_ref['created_at']),
launched_at=str(instance_ref['launched_at']) \
if instance_ref['launched_at'] else '',
image_ref=instance_ref['image_ref'])
usage_info.update(kw)
return usage_info
def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbols.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
return ''.join([r.choice(symbols) for _i in xrange(length)])
def last_octet(address):
return int(address.split('.')[-1])
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
raise exception.Error(_('Link Local address is not found.:%s')
% if_str)
except Exception as ex:
raise exception.Error(_("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % locals())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
utcnow.override_time = None
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
return utcnow() - before > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return time.mktime(utcnow().timetuple())
def set_time_override(override_time=datetime.datetime.utcnow()):
"""Override utils.utcnow to return a constant time."""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overriden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overriden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def isotime(at=None):
"""Returns iso formatted utcnow."""
return strtime(at, ISO_TIME_FORMAT)
def parse_isotime(timestr):
"""Turn an iso formatted time back into a datetime."""
return parse_strtime(timestr, ISO_TIME_FORMAT)
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = self.__pivot.value
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if type(backend) == type(tuple()):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, now=True):
self._running = True
done = event.Event()
def _inner():
if not now:
greenthread.sleep(interval)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
logging.exception('in looping call')
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
return saxutils.escape(value, {'"': '"'})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def to_primitive(value, convert_instances=False, level=0):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
for test in nasty:
if test(value):
return unicode(value)
if level > 3:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
if type(value) is type([]) or type(value) is type((None,)):
o = []
for v in value:
o.append(to_primitive(v, convert_instances=convert_instances,
level=level))
return o
elif type(value) is type({}):
o = {}
for k, v in value.iteritems():
o[k] = to_primitive(v, convert_instances=convert_instances,
level=level)
return o
elif isinstance(value, datetime.datetime):
return str(value)
elif hasattr(value, 'iteritems'):
return to_primitive(dict(value.iteritems()),
convert_instances=convert_instances,
level=level)
elif hasattr(value, '__iter__'):
return to_primitive(list(value), level)
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return to_primitive(value.__dict__,
convert_instances=convert_instances,
level=level + 1)
else:
return value
except TypeError, e:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
def dumps(value):
try:
return json.dumps(value)
except TypeError:
pass
return json.dumps(to_primitive(value))
def loads(s):
return json.loads(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append(("nova.utils", "dumps", TypeError,
"loads", ValueError))
anyjson.force_implementation("nova.utils")
_semaphores = {}
class _NoopContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def synchronized(name, external=False):
"""Synchronization decorator.
Decorating a method like so:
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock:
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn\
# amically-allocating-and-destroying-mutexes
if name not in _semaphores:
_semaphores[name] = semaphore.Semaphore()
sem = _semaphores[name]
LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
'"%(method)s"...' % {'lock': name,
'method': f.__name__}))
with sem:
if external:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...' %
{'lock': name, 'method': f.__name__}))
lock_file_path = os.path.join(FLAGS.lock_path,
'nova-%s.lock' % name)
lock = lockfile.FileLock(lock_file_path)
else:
lock = _NoopContextManager()
with lock:
retval = f(*args, **kwargs)
# If no-one else is waiting for it, delete it.
# See note about possible raciness above.
if not sem.balance < 1:
del _semaphores[name]
return retval
return inner
return wrap
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, types.ListType):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, types.ListType):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def flatten_dict(dict_, flattened=None):
"""Recursively flatten a nested dictionary."""
flattened = flattened or {}
for key, value in dict_.iteritems():
if hasattr(value, 'iteritems'):
flatten_dict(value, flattened)
else:
flattened[key] = value
return flattened
def partition_dict(dict_, keys):
"""Return two dicts, one with `keys` the other with everything else."""
intersection = {}
difference = {}
for key, value in dict_.iteritems():
if key in keys:
intersection[key] = value
else:
difference[key] = value
return intersection, difference
def map_dict_keys(dict_, key_map):
"""Return a dict in which the dictionaries keys are mapped to new keys."""
mapped = {}
for key, value in dict_.iteritems():
mapped_key = key_map[key] if key in key_map else key
mapped[mapped_key] = value
return mapped
def subset_dict(dict_, keys):
"""Return a dict that only contains a subset of keys."""
subset = partition_dict(dict_, keys)[0]
return subset
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
# TODO(justinsb): Can we make this better??
return cls() # Ugly PyLint hack
def parse_server_string(server_str):
"""
Parses the given server_string and returns a list of host and port.
If it's not a combination of host part and port, the port element
is a null string. If the input is invalid expression, return a null
list.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except Exception:
LOG.debug(_('Invalid server_string: %s' % server_str))
return ('', '')
def gen_uuid():
return uuid.uuid4()
def is_uuid_like(val):
"""For our purposes, a UUID is a string in canoical form:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
if not val:
return False
try:
return True if int(val) else False
except ValueError:
return val.lower() == 'true'
def is_valid_ipv4(address):
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
where xxx is a value between 0 and 255.
"""
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
def monkey_patch():
""" If the Flags.monkey_patch set as True,
this functuion patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using FLAGS.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If FLAGS.monkey_patch is not True, this function do nothing.
if not FLAGS.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in FLAGS.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(clz, method,\
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,\
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts"""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
def timefunc(func):
"""Decorator that logs how long a particular function took to execute"""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
try:
return func(*args, **kwargs)
finally:
total_time = time.time() - start_time
LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
dict(name=func.__name__, total_time=total_time))
return inner
def generate_glance_url():
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
|
|
"""Fourier Series"""
from __future__ import print_function, division
from sympy import pi, oo, Wild
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import sin, cos, sinc
from sympy.series.series_class import SeriesBase
from sympy.series.sequences import SeqFormula
from sympy.sets.sets import Interval
from sympy.simplify.fu import TR2, TR1, TR10, sincos_to_sum
def fourier_cos_seq(func, limits, n):
"""Returns the cos sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
cos_term = cos(2*n*pi*x / L)
formula = 2 * cos_term * integrate(func * cos_term, limits) / L
a0 = formula.subs(n, S.Zero) / 2
return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits)
/ L, (n, 1, oo))
def fourier_sin_seq(func, limits, n):
"""Returns the sin sequence in a Fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
sin_term = sin(2*n*pi*x / L)
return SeqFormula(2 * sin_term * integrate(func * sin_term, limits)
/ L, (n, 1, oo))
def _process_limits(func, limits):
"""
Limits should be of the form (x, start, stop).
x should be a symbol. Both start and stop should be bounded.
* If x is not given, x is determined from func.
* If limits is None. Limit of the form (x, -pi, pi) is returned.
Examples
========
>>> from sympy import pi
>>> from sympy.series.fourier import _process_limits as pari
>>> from sympy.abc import x
>>> pari(x**2, (x, -2, 2))
(x, -2, 2)
>>> pari(x**2, (-2, 2))
(x, -2, 2)
>>> pari(x**2, None)
(x, -pi, pi)
"""
def _find_x(func):
free = func.free_symbols
if len(free) == 1:
return free.pop()
elif not free:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the function contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))"
% func)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(func), -pi, pi
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(func)
start, stop = limits
if not isinstance(x, Symbol) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
unbounded = [S.NegativeInfinity, S.Infinity]
if start in unbounded or stop in unbounded:
raise ValueError("Both the start and end value should be bounded")
return sympify((x, start, stop))
def finite_check(f, x, L):
def check_fx(exprs, x):
return x not in exprs.free_symbols
def check_sincos(_expr, x, L):
if isinstance(_expr, (sin, cos)):
sincos_args = _expr.args[0]
if sincos_args.match(a*(pi/L)*x + b) is not None:
return True
else:
return False
_expr = sincos_to_sum(TR2(TR1(f)))
add_coeff = _expr.as_coeff_add()
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k != S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols, ])
for s in add_coeff[1]:
mul_coeffs = s.as_coeff_mul()[1]
for t in mul_coeffs:
if not (check_fx(t, x) or check_sincos(t, x, L)):
return False, f
return True, _expr
class FourierSeries(SeriesBase):
r"""Represents Fourier sine/cosine series.
This class only represents a fourier series.
No computation is performed.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
See Also
========
sympy.series.fourier.fourier_series
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1][0]
@property
def period(self):
return (self.args[1][1], self.args[1][2])
@property
def a0(self):
return self.args[2][0]
@property
def an(self):
return self.args[2][1]
@property
def bn(self):
return self.args[2][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def L(self):
return abs(self.period[1] - self.period[0]) / 2
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def truncate(self, n=3):
"""
Return the first n nonzero terms of the series.
If n is None return an iterator.
Parameters
==========
n : int or None
Amount of non-zero terms in approximation or None.
Returns
=======
Expr or iterator
Approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.truncate(4)
2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2
See Also
========
sympy.series.fourier.FourierSeries.sigma_approximation
"""
if n is None:
return iter(self)
terms = []
for t in self:
if len(terms) == n:
break
if t is not S.Zero:
terms.append(t)
return Add(*terms)
def sigma_approximation(self, n=3):
r"""
Return :math:`\sigma`-approximation of Fourier series with respect
to order n.
Sigma approximation adjusts a Fourier summation to eliminate the Gibbs
phenomenon which would otherwise occur at discontinuities.
A sigma-approximated summation for a Fourier series of a T-periodical
function can be written as
.. math::
s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1}
\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot
\left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr)
+ b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right],
where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier
series coefficients and
:math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos
:math:`\sigma` factor (expressed in terms of normalized
:math:`\operatorname{sinc}` function).
Parameters
==========
n : int
Highest order of the terms taken into account in approximation.
Returns
=======
Expr
Sigma approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.sigma_approximation(4)
2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3
See Also
========
sympy.series.fourier.FourierSeries.truncate
Notes
=====
The behaviour of
:meth:`~sympy.series.fourier.FourierSeries.sigma_approximation`
is different from :meth:`~sympy.series.fourier.FourierSeries.truncate`
- it takes all nonzero terms of degree smaller than n, rather than
first n nonzero ones.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon
.. [2] https://en.wikipedia.org/wiki/Sigma_approximation
"""
terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n])
if t is not S.Zero]
return Add(*terms)
def shift(self, s):
"""Shift the function by a term independent of x.
f(x) -> f(x) + s
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
a0 = self.a0 + s
sfunc = self.function + s
return self.func(sfunc, self.args[1], (a0, self.an, self.bn))
def shiftx(self, s):
"""Shift x by a term independent of x.
f(x) -> f(x + s)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x + s)
bn = self.bn.subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def scale(self, s):
"""Scale the function by a term independent of x.
f(x) -> s * f(x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.coeff_mul(s)
bn = self.bn.coeff_mul(s)
a0 = self.a0 * s
sfunc = self.args[0] * s
return self.func(sfunc, self.args[1], (a0, an, bn))
def scalex(self, s):
"""Scale x by a term independent of x.
f(x) -> f(s*x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x * s)
bn = self.bn.subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def _eval_as_leading_term(self, x):
for t in self:
if t is not S.Zero:
return t
def _eval_term(self, pt):
if pt == 0:
return self.a0
return self.an.coeff(pt) + self.bn.coeff(pt)
def __neg__(self):
return self.scale(-1)
def __add__(self, other):
if isinstance(other, FourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
an = self.an + other.an
bn = self.bn + other.bn
a0 = self.a0 + other.a0
return self.func(function, self.args[1], (a0, an, bn))
return Add(self, other)
def __sub__(self, other):
return self.__add__(-other)
class FiniteFourierSeries(FourierSeries):
r"""Represents Finite Fourier sine/cosine series.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
Parameters
==========
f : Expr
Expression for finding fourier_series
limits : ( x, start, stop)
x is the independent variable for the expression f
(start, stop) is the period of the fourier series
exprs: (a0, an, bn) or Expr
a0 is the constant term a0 of the fourier series
an is a dictionary of coefficients of cos terms
an[k] = coefficient of cos(pi*(k/L)*x)
bn is a dictionary of coefficients of sin terms
bn[k] = coefficient of sin(pi*(k/L)*x)
or exprs can be an expression to be converted to fourier form
Methods
=======
This class is an extension of FourierSeries class.
Please refer to sympy.series.fourier.FourierSeries for
further information.
See Also
========
sympy.series.fourier.FourierSeries
sympy.series.fourier.fourier_series
"""
def __new__(cls, f, limits, exprs):
if not (type(exprs) == tuple and len(exprs) == 3): # exprs is not of form (a0, an, bn)
# Converts the expression to fourier form
c, e = exprs.as_coeff_add()
rexpr = c + Add(*[TR10(i) for i in e])
a0, exp_ls = rexpr.expand(trig=False, power_base=False, power_exp=False, log=False).as_coeff_add()
x = limits[0]
L = abs(limits[2] - limits[1]) / 2
a = Wild('a', properties=[lambda k: k.is_Integer, lambda k: k is not S.Zero, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols, ])
an = dict()
bn = dict()
# separates the coefficients of sin and cos terms in dictionaries an, and bn
for p in exp_ls:
t = p.match(b * cos(a * (pi / L) * x))
q = p.match(b * sin(a * (pi / L) * x))
if t:
an[t[a]] = t[b] + an.get(t[a], S.Zero)
elif q:
bn[q[a]] = q[b] + bn.get(q[a], S.Zero)
else:
a0 += p
exprs = (a0, an, bn)
args = map(sympify, (f, limits, exprs))
return Expr.__new__(cls, *args)
@property
def interval(self):
_length = 1 if self.a0 else 0
_length += max(set(self.an.keys()).union(set(self.bn.keys()))) + 1
return Interval(0, _length)
@property
def length(self):
return self.stop - self.start
def shiftx(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate().subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], _expr)
def scale(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate() * s
sfunc = self.function * s
return self.func(sfunc, self.args[1], _expr)
def scalex(self, s):
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
_expr = self.truncate().subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], _expr)
def _eval_term(self, pt):
if pt == 0:
return self.a0
_term = self.an.get(pt, S.Zero) * cos(pt * (pi / self.L) * self.x) \
+ self.bn.get(pt, S.Zero) * sin(pt * (pi / self.L) * self.x)
return _term
def __add__(self, other):
if isinstance(other, FourierSeries):
return other.__add__(fourier_series(self.function, self.args[1],\
finite=False))
elif isinstance(other, FiniteFourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
return fourier_series(function, limits=self.args[1])
def fourier_series(f, limits=None, finite=True):
"""Computes Fourier sine/cosine series expansion.
Returns a :class:`FourierSeries` object.
Examples
========
>>> from sympy import fourier_series, pi, cos
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.truncate(n=3)
-4*cos(x) + cos(2*x) + pi**2/3
Shifting
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
Scaling
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
Notes
=====
Computing Fourier series can be slow
due to the integration required in computing
an, bn.
It is faster to compute Fourier series of a function
by using shifting and scaling on an already
computed Fourier series rather than computing
again.
e.g. If the Fourier series of ``x**2`` is known
the Fourier series of ``x**2 - 1`` can be found by shifting by ``-1``.
See Also
========
sympy.series.fourier.FourierSeries
References
==========
.. [1] mathworld.wolfram.com/FourierSeries.html
"""
f = sympify(f)
limits = _process_limits(f, limits)
x = limits[0]
if x not in f.free_symbols:
return f
if finite:
L = abs(limits[2] - limits[1]) / 2
is_finite, res_f = finite_check(f, x, L)
if is_finite:
return FiniteFourierSeries(f, limits, res_f)
n = Dummy('n')
neg_f = f.subs(x, -x)
if f == neg_f:
a0, an = fourier_cos_seq(f, limits, n)
bn = SeqFormula(0, (1, oo))
elif f == -neg_f:
a0 = S.Zero
an = SeqFormula(0, (1, oo))
bn = fourier_sin_seq(f, limits, n)
else:
a0, an = fourier_cos_seq(f, limits, n)
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
|
|
import os
import sys
import random
import time
import cPickle
import subprocess
import math
import pdb
import inspect
import types
import imp
### define globals
rproc_nqstat_time = None
rproc_nqstat_output = None
MATLAB_RETURN_VALUE = None
THIS_IS_A_RPROC_PROCESS = None
rproc_wait_jobinfo = None
#### define jobinfo class
class Jobinfo():
def __init__(self):
self.ProcName = []
self.P1 = []
self.Mem = []
self.options = dict()
self.time = None
self.prefix = []
self.mat_fname = ''
self.data_fname = ''
self.result_fname = ''
self.m_fname = ''
self.log_fname = ''
self.qsublog_fname = ''
self.jobid = -1
self.submission_time = None
self.retries = 0
self.created = 0
self.time_of_loss = None
self.crashed_time = None
self.maxvmem = None
self.resubmit = False
self.time_req_resubmit = []
self.mem_req_resubmit = []
self.data_size = []
self.start_time = []
self.hard_time_limit = 1000000
self.callfile = None
### define Error class
class RprocRerun(Exception):
def __init__(self, string):
self.string = string
def __str__(self):
return repr(self.string)
def rproc(ProcName, P1, Mem=None, options=None, runtime=None, callfile=None, resubmission=False):
# [jobinfo]=rproc(ProcName, P1, Mem, options, time)
#
# time in minutes
# mem in mb
environment = '' # TODO
if callfile is None:
### check if ProcName is defined in calling function
callframe = sys._getframe(1)
if not ProcName in callframe.f_locals:
if not ProcName in callframe.f_globals:
print >> sys.stderr, 'ERROR: Could find no definition for %s in local or global context of calling function. Use kword callfile to specify file where %s is defined. Use the relative path to the location of the calling function!' % (ProcName, ProcName)
return
else:
callfile = (callframe.f_globals[ProcName].__module__, inspect.getfile(callframe.f_globals[ProcName]))
else:
callfile = (callframe.f_locals[ProcName].__module__, inspect.getfile(callframe.f_locals[ProcName]))
### detect path of this script
this_frame = sys._getframe(0)
rproc_path = os.path.abspath(inspect.getfile(this_frame))
if runtime is None:
runtime = 10000
if Mem is None:
Mem = 300
if Mem < 100:
print >> sys.stderr, 'WARNING: You specified to allocate less than 100Mb memory for your job. This might not be enough to start. Re-setting to 100Mb'
Mem = 100
if options is None:
options = dict()
### get module list of caller to re-create environment
if not 'imports' in options:
options['imports'] = dict()
if not resubmission:
callframe = sys._getframe(1)
#options['package'] = os.path.dirname(os.path.abspath(callframe.f_globals['__file__']))
for l in callframe.f_globals:
if (len(l) < 2 or l[:2] != '__'):
if isinstance(callframe.f_globals[l], types.ModuleType):
if not l in options['imports']:
if imp.is_builtin(callframe.f_globals[l].__name__) != 0:
options['imports'][l] = (callframe.f_globals[l].__name__, 'builtin')
else:
options['imports'][l] = (callframe.f_globals[l].__name__, callframe.f_globals[l].__file__)
if not callfile[0] in options['imports']:
options['imports'][callfile[0]] = callfile
home_str = os.environ['HOME']
use_reservation = False
### TODO this is only relevant for SGE
if 'ncpus' in options and options['ncpus'] > 1:
Mem /= options['ncpus']
use_reservation = 1 ;
if not 'verbosity' in options:
options['verbosity'] = True
if not 'maxjobs' in options:
options['maxjobs'] = 3000
if not 'waitonfull' in options:
options['waitonfull'] = True
if not 'immediately' in options:
options['immediately'] = False
if not 'immediately_bg' in options:
options['immediately_bg'] = False
if not 'submit_now' in options:
options['submit_now'] = True
if not 'nicetohave' in options:
options['nicetohave'] = False
if not 'ncpus' in options:
options['ncpus'] = 1
if not 'start_dir' in options:
dirctry = os.getcwd()
else:
dirctry = options['start_dir']
if not 'resubmit' in options:
options['resubmit'] = False
options['time_req_resubmit'] = []
options['mem_req_resubmit'] = []
if not 'data_size' in options:
options['data_size'] = []
if not 'hard_time_limit' in options:
options['hard_time_limit'] = 1000000
jobinfo = rproc_empty()
jobinfo.ProcName = ProcName
jobinfo.P1 = P1
jobinfo.Mem = Mem
jobinfo.options = options
jobinfo.time = runtime
jobinfo.created = True
jobinfo.resubmit = options['resubmit']
jobinfo.mem_req_resubmit = options['mem_req_resubmit']
jobinfo.time_req_resubmit = options['time_req_resubmit']
jobinfo.data_size = options['data_size']
jobinfo.hard_time_limit = options['hard_time_limit']
if not os.path.exists(os.path.join(os.environ['HOME'], 'tmp', '.sge')):
username = os.environ['USER']
base_dir = os.path.join(home_str, '.sge.', 'tmp', username)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
tmp_dir = os.path.join(home_str, '.sge', 'tmp', username, 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
sge_dir = os.path.join(home_str, '.sge', 'tmp', username, 'sge')
if not os.path.exists(sge_dir):
os.makedirs(sge_dir)
if not os.path.exists(os.path.join(os.environ['HOME'], 'tmp')):
os.makedirs(os.path.join(os.environ['HOME'], 'tmp'))
if not os.path.exists(os.path.join(os.environ['HOME'], 'tmp', '.sge')):
os.symlink(sge_tmp_dir, os.path.join(os.environ['HOME'], 'tmp', '.sge'))
assert(os.path.exists(os.path.join(os.environ['HOME'],'tmp', '.sge')))
if not os.path.exists(os.path.join(dirctry, '.sge')):
username = os.environ['USER']
sge_base_dir = dirctry.replace(os.path.join('cbio', 'grlab', 'home', username), os.path.join(home_str, '.sge', 'tmp', username))
if not os.path.exists(sge_base_dir):
os.makedirs(sge_base_dir)
sge_dir = os.path.join(sge_base_dir, 'sge')
os.makedirs(sge_dir)
os.symlink(sge_dir, os.path.join(dirctry, '.sge'))
### assembly option string
if use_reservation:
option_str = ' -R y'
else:
option_str = ''
#option_str += ' -l h_vmem=%iM -l s_vmem=%iM -soft -l h_cpu=%1.0f -hard ' % (Mem, Mem, max(60, runtime*60))
option_str += '-l nodes=1:ppn=%i -l mem=%imb,vmem=%imb,pmem=%imb -l walltime=%1.0f' % (options['ncpus'], Mem, Mem, Mem, max(60, runtime*60))
if environment == 'galaxy':
option_str += ' -l parent=0.0 '
if 'hold' in options:
if options['hold']:
option_str += ' -h u'
if 'queue' in options:
option_str += ' -q "%s" ' % options['queue']
if 'nicetohave' in options and options['nicetohave']:
option_str += ' -l nicetohave=1'
if 'priority' in options:
option_str += ' -p %i' % options['priority']
if 'express' in options and options['express']:
option_str += ' -l express'
if 'hostname' in options:
option_str += '%s -l hostname=%s' % (option_str, options['hostname'])
### TODO make this configurable
bin_str = 'python'
### request cplex license
if 'cplex' in options and options['cplex']:
option_str += ' -l cplex=1'
### request several cpus
#if 'ncpus' in options and options['ncpus'] > 1:
# option_str += ' -pe "*" %i ' % options['ncpus']
if 'identifier' in options:
identifier = options['identifier']
else:
identifier = 'RP' ;
cc = random.randint(0, 100000)
prefix = '%s%i-%1.10f' % (identifier, cc, time.time())
rproc_dir = '%s/tmp/.sge' % os.environ['HOME']
mat_fname = os.path.join(rproc_dir, '%s.pickle' % prefix)
data_fname = os.path.join(rproc_dir, '%s_data.pickle' % prefix)
result_fname = os.path.join(rproc_dir, '%s_result.pickle' % prefix)
m_fname = os.path.join(rproc_dir, '%s.sh' % prefix)
while os.path.exists(mat_fname) or os.path.exists(result_fname) or os.path.exists(m_fname):
cc = random.randint(0, 100000)
prefix = '%s%i-%1.10f' % (identifier, cc, time.time())
mat_fname = os.path.join(rproc_dir, '%s.pickle' % prefix)
data_fname = os.path.join(rproc_dir, '%s_data.pickle' % prefix)
result_fname = os.path.join(rproc_dir, '%s_result.pickle' % prefix)
m_fname = os.path.join(rproc_dir, '%s.sh' % prefix)
if 'log_fname' in options:
log_fname = options['log_fname']
else:
log_fname = os.path.join(dirctry, '.sge', '%s_%s.rproc' % (prefix, time.strftime('%d-%b-%Y_%H_%M')))
qsublog_fname = '%s.qsubout' % log_fname
jobinfo.prefix = prefix
jobinfo.mat_fname = mat_fname
jobinfo.data_fname = data_fname
jobinfo.result_fname = result_fname
jobinfo.m_fname = m_fname
jobinfo.log_fname = log_fname
jobinfo.qsublog_fname = qsublog_fname
jobinfo.callfile = callfile
### save the call information
cPickle.dump((ProcName, dirctry, options, callfile), open(mat_fname, 'wb'), -1)
cPickle.dump(P1, open(data_fname, 'wb'), -1)
evalstring = '%s %s %s %s' % (bin_str, rproc_path, mat_fname, data_fname)
evalstring = 'cd %s; %s; exit' % (dirctry, evalstring)
fd = open(m_fname, 'w')
print >> fd, '%s' % evalstring
fd.close()
if 'envstr' in options:
envstr = options['envstr']
if len(envstr) > 0:
envstr += ';'
else:
envstr = ''
if options['immediately']:
str = '%s bash %s >> %s' % (envstr, m_fname, log_fname)
elif options['immediately_bg']:
str = '%s bash %s >> %s &' % (envstr, m_fname, log_fname)
else:
#str = 'echo \'%s hostname; bash %s >> %s\' | qsub -o %s -j y -r y %s -N %s >> %s 2>&1' % (envstr, m_fname, log_fname, qsublog_fname, option_str, prefix, log_fname)
str = 'echo \'%s hostname; bash %s >> %s\' | qsub -o %s -j oe -r y %s -N %s >> %s 2>&1' % (envstr, m_fname, log_fname, qsublog_fname, option_str, prefix, log_fname)
#print >> sys.stderr, str
#import pdb
#pdb.set_trace()
### too verbose
#if options['submit_now'] and options['verbosity']:
# print str
# wait until we are allowed to submit again, i.e. #jobs < maxjobs
if not options['immediately'] and not options['immediately_bg'] and options['waitonfull']:
while True:
try:
num_queued = int(subprocess.check_output('qstat -u' + os.environ['USER'] + '2> /dev/null | grep ' + os.environ['USER'] + '| wc -l | tr -d " "', shell=True).strip())
except:
print >> sys.stderr, 'WARNING: could not determine how many jobs are scheduled'
break
# keep 50 spare jobs if multiple rprocs are scheduling...
if (num_queued < options['maxjobs']):
break
else:
if options['verbosity']:
print >> sys.stdout, 'queue full, sleeping 60 seconds (%i/%i)' %(num_queued, options['maxjobs'])
time.sleep(60)
if options['submit_now']:
if options['immediately'] and options['verbosity']:
print >> sys.stdout, 'immediatedly starting job on local machine'
if options['immediately_bg'] and options['verbosity']:
print >> sys.stdout, 'immediatedly starting job on local machine in background'
if options['immediately_bg']:
while True:
str_ = subprocess.check_output('uptime').strip()
float(str_[re.search('average:', str_).start()+8:].split(',')[0])
hit = re.search('average:', str_)
while hit is None:
hit = re.search('average:', str_)
idx = hit.start()
cpu_load = float(str_[idx+8:].split(',')[0])
if cpu_load > 13:
if options['verbosity']:
print 'load too high: %1.2f' % cpu_load
time.sleep(10)
else:
break
time.sleep(2)
p1 = subprocess.Popen(['echo', str], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['bash'], stdin=p1.stdout, stdout=subprocess.PIPE)
p2.communicate()
ret = p2.returncode
if ret != 0:
print >> sys.stderr, 'submission failed:\n\tsubmission string: %s\n\treturn code: %i' % (str, ret)
jobinfo.submission_time = time.time()
if not options['immediately'] and not options['immediately_bg']:
fd = open(log_fname, 'r')
jobinfo.jobid = -1
if fd:
s = fd.read().strip()
items = s.split('.')
if not ((items[1] == 'mskcc-fe1') and (items[2] == 'local')):
print >> sys.stderr, str
print >> sys.stderr, 'ERROR: submission failed: %s' % s
sys.exit(1)
jobinfo.jobid = int(items[0])
fd.close()
rproc_register('submit', jobinfo)
else:
jobinfo.jobid = 0
else:
jobinfo.jobid = 0
return jobinfo
def finish():
print 'rproc finishing'
global MATLAB_RETURN_VALUE
if MATLAB_RETURN_VALUE is not None:
print 'exit code %i' % MATLAB_RETURN_VALUE
try:
rf = os.environ['MATLAB_RETURN_FILE']
fd = fopen(rf, 'w+')
if fd:
print >> fd, '%i', MATLAB_RETURN_VALUE[0]
fclose(fd) ;
except KeyError:
print >> sys.stderr, 'WARNING: environment MATLAB_RETURN_FILE not defined'
def rproc_clean_register():
fname = os.path.join(os.environ['HOME'], 'tmp', 'rproc.log')
jobids = []
parent_jobids = []
fd = open(fname, 'r')
for line in fd:
if len(line.strip()) == 0:
continue
items = line.split()
if len(items) < 4:
continue
if len(items[0]) == 0:
continue
if len(items[3]) == 0:
continue
if items[0][0] >= '0' and items[0][0] <= '9' and ((items[3][0] >= '0' and items[3][0] <= '9') or items[3][0] == '-'):
jobids.append(int(items[0]))
parent_jobids.append(int(items[3]))
text = subprocess.check_output('qstat').strip()
for line in text.split('\n'):
items = line.split(' ')
if items[0][0] >= '0' and items[0][0] <= '9':
running_jobids.append(int(items[0]))
idx = [i for i in range(len(jobids)) if jobids[i] in running_jobids]
for i in range(len(idx)):
if not parent_jobids[idx[i]] in running_jobids and parent_jobids[idx[i]] != -1:
print >> sys.stderr, 'job %i is still running, but the parent job %i not' % (jobids[idx[i]], parent_jobids[idx[i]])
def rproc_cleanup(jobinfo):
for ix in range(len(jobinfo)):
command = 'rm -f %s %s %s %s %s %s' % (jobinfo[ix].mat_fname, jobinfo[ix].result_fname,
jobinfo[ix].m_fname, jobinfo[ix].log_fname,
jobinfo[ix].qsublog_fname, jobinfo[ix].data_fname)
subprocess.call(command.split(' '))
rproc_register('cleanup', jobinfo[ix])
def rproc_cmd(unix_cmd, jobinfo):
for i in range(len(jobinfo)):
if len(jobinfo[i].jobid) > 0 and jobinfo[i].jobid != -1:
subprocess.call([unix_cmd, jobinfo[i].jobid])
def rproc_create(ProcName, P1, Mem=100, options=[], runtime=144000):
# [jobinfo]=rproc(ProcName, P1, Mem, options, time)
#
# time in minutes
# mem in mb
jobinfo = rproc_empty()
jobinfo.ProcName = ProcName
jobinfo.P1 = P1
jobinfo.Mem = Mem
jobinfo.options = options
jobinfo.time = runtime
jobinfo.created = True
jobinfo.retries = -1
return jobinfo
def rproc_empty(N = None):
"""Create jobinfo list"""
if N is None:
N = 1
if N > 1:
jobinfo = []
for i in range(N):
jobinfo.append(Jobinfo())
else:
jobinfo = Jobinfo()
return jobinfo
def rproc_finished(jobinfo):
# isfinished = rproc_finished(jobinfo) ;
if jobinfo.jobid == -1:
return False
if os.path.exists(jobinfo.result_fname):
rproc_register('finished', jobinfo)
return True
return False
def rproc_kill(jobinfo):
if jobinfo == 'wait':
global rproc_wait_jobinfo
jobinfo = rproc_wait_jobinfo
for i in range(len(jobinfo)):
if len(jobinfo[i].jobid) and jobinfo[i].jobid > 0:
subprocess.call('qdel', jobinfo[i].jobid, '2>', '/dev/null')
rproc_register('kill', jobinfo[i])
def rproc_reached_timelimit(jobinfo):
# [result, jobwalltime] = rproc_reached_timelimit(jobinfo)
#str_ = 'qacct -j %i | grep ru_wallclock|sed \'s/ru_wallclock//g\'' % jobinfo.jobid
str_ = 'qstat -f %i | grep resources_used.walltime | sed -e "s/.*= //g"' % jobinfo.jobid
w = subprocess.check_output(str_, shell=True)
## TODO use save Popen for pipeline
if 'error' in w:
return (False, -1)
try:
jobwalltime = split_walltime(w.strip()) # get time in seconds
except Exception:
return (False, -1)
if not (jobwalltime > 0 and jobwalltime < 36000000): # sanity checks
print >> sys.stderr, 'WARNING: invalid output from qacct'
return (False, -1)
if jobwalltime > (jobinfo.time * 60):
return (True, jobwalltime)
else:
return (False, jobwalltime)
def rproc_register(action, jobinfo):
try:
this_jobid = int(os.environ['JOB_ID'])
except:
this_jobid = -1
rproc_log_fname = os.path.join(os.environ['HOME'], 'tmp', 'pyproc.log')
if not os.path.exists(rproc_log_fname):
fd = open(rproc_log_fname, 'a+')
print >> fd, '# prefix\taction\tparent jobid\tjobid\tfunction\ttime'
fd.close()
fd = open(rproc_log_fname, 'a+')
print >> fd, '%i\t%s\t%s\t%i\t%s\t%s' % (jobinfo.jobid, jobinfo.prefix, action, this_jobid, jobinfo.ProcName, time.asctime()) #time.strftime('%a_%b_%d_%Y_%H:%M:%S'))
fd.close()
def rproc_rerun(mess=''):
global MATLAB_RETURN_VALUE
MATLAB_RETURN_VALUE=99
global THIS_IS_A_RPROC_PROCESS
if THIS_IS_A_RPROC_PROCESS is not None and THIS_IS_A_RPROC_PROCESS == 1:
sys.exit
else:
raise RprocRerun(mess)
def rproc_resubmit(jobinfo, force=True):
# jobinfo2 = rproc_resubmit(jobinfo);
if jobinfo is None:
return jobinfo
elif isinstance(jobinfo, list):
jobinfo2 = jobinfo
for i in range(len(jobinfo)):
jobinfo2[i] = rproc_resubmit(jobinfo[i])
return jobinfo2
if (jobinfo.retries >= 0) and (rproc_time_since_submission(jobinfo) < 1):
#warning('job was submitted less than a minute ago. not resubmitted.') ;
return jobinfo
if jobinfo.retries >= 3:
if jobinfo.options['verbosity'] >= 0:
print >> sys.stderr, 'Warning: job has already been submitted %i times' % jobinfo.retries
if jobinfo.options['verbosity'] > 0:
print 'check file %s' % jobinfo.log_fname
return jobinfo
if (jobinfo.retries >= 0):
(still_running, qstat_line, start_time, status) = rproc_still_running(jobinfo)
if still_running:
if jobinfo.options['verbosity'] > 0:
print >> sys.stdout, '.',
jobinfo2 = jobinfo
jobinfo2.time_of_loss = None
return jobinfo2
if jobinfo.time_of_loss is None:
jobinfo.time_of_loss = time.time()
# more than a minute lost?
if not force and ((jobinfo.time_of_loss - time.time() / 60) < 1):
#warning('do not resubmit yet ... ') ;
jobinfo2 = jobinfo
return jobinfo2
#rproc_cleanup(jobinfo)
#fprintf('\nresubmitting job\n') ;
jobinfo2 = rproc(jobinfo.ProcName, jobinfo.P1, jobinfo.Mem, jobinfo.options, jobinfo.time, jobinfo.callfile, resubmission=True)
if jobinfo.jobid != -1:
# increase only, if it has not been resubmitted before
jobinfo2.retries = jobinfo.retries + 1
return jobinfo2
def rproc_result(jobinfo, read_attempts=None):
# [retval1, retval2] = rproc_result(jobinfo, [read_attempts])
if not os.path.exists(jobinfo.result_fname):
att = 1
while not os.path.exists(jobinfo.result_fname):
print >> sys.stdout, 'Job not finished yet. Waiting for result file to appear.'
if read_attempts is not None and att > read_attempts:
error('Unable to load result from %s', jobinfo.result_fname);
time.sleep(10)
att += 1
(retval1, retval2) = cPickle.load(open(jobinfo.result_fname, 'r'))
return (retval1, retval2)
def rproc_still_running(jobinfo):
# [still_running, line, start_time, status] = rproc_still_running(jobinfo);
status = 0
still_running = 0
global rproc_nqstat_output
global rproc_nqstat_time
start_time = []
if jobinfo.jobid == 0:
# runs locally in background
still_running = not rproc_finished(jobinfo)
line = 'local job %s is still running: %s\n' % (jobinfo.prefix, jobinfo.log_fname)
return (still_running, line, start_time, status)
curtime = time.time()
if rproc_nqstat_time is None or (curtime - rproc_nqstat_time > 0.5e-4):
try:
text = subprocess.check_output(['qstat', '-u', os.environ['USER']])
rproc_nqstat_output = text
rproc_nqstat_time = curtime
except subprocess.CalledProcessError as e:
if e.returncode == 130:
print >> sys.stderr, 'rproc_still_running interupted by user'
status = -1
line = ''
start_time = ''
print >> sys.stderr, 'WARNING: qstat failed'
text = ''
else:
text = rproc_nqstat_output
for line in text.strip().split('\n'):
if len(line) > 0:
items = line.split(' ')
if not os.environ['USER'] in items:
continue
for j in range(len(items)): #assume that first non-empty item is the jobid
if len(items[j]) > 0:
p = int(items[j].split('.')[0].strip('[]'))
if p == jobinfo.jobid:
still_running = 1
status = get_status(items)
still_running = check_status(status)
if len(jobinfo.start_time) == 0 and status == 'r':
start_time = time.time()
else:
start_time = jobinfo.start_time
return (still_running, line, start_time, status)
break
line = []
return (still_running, line, start_time, status)
def get_status(items):
# status = get_status(items)
status = ''
num = 0
for j in range(len(items)):
if len(items[j]) > 0:
num += 1
if num == 10:
status = items[j]
break
return status
def check_status(status):
# ret = check_status(status)
#if status in ['t', 'Eqw', 'dt', 'dr']:
if status in ['E', 'C', 'S']:
return 0
else:
return 1
def rproc_submit_and_wait(jobinfo, finish_frac, jobtimeout):
# [jobinfo,frac_finished]=rproc_submit_and_wait(jobinfo, finish_frac, jobtimeout)
num_jobs = 0
for i in range(len(jobinfo)):
if jobinfo[i].created == 1:
num_jobs += 1
num_finished = 0
while (num_finished / float(num_jobs) < finish_frac):
num_finished = 0
for id in range(len(jobinfo)):
if rproc_finished(jobinfo[id]):
num_finished += 1
else:
if jobinfo[id].created == 1:
if rproc_time_since_submission(jobinfo[id]) > jobtimeout:
print >> sys.stderr, 'WARNING: job took longer than timeout. Killing and restarting it'
rproc_kill(jobinfo[id])
jobinfo[id] = rproc_resubmit(jobinfo[id], 1)
print 'waiting for jobs to finish: %i/%i \r' % (num_finished, num_jobs)
if (num_finished / float(num_jobs) < finish_frac):
time.sleep(10)
print ''
def rproc_submit_batch(jobinfo, blocksize):
# [jobinfo, meta_jobinfo] = rproc_submit_many(jobinfo, blocksize)
meta_jobinfo = rproc_empty(0)
time_per_submission = 1.0/60 # 1 seconds
time_per_metajob = [0 for i in range(int(math.ceil(len(jobinfo) / float(blocksize))))]
metablockassignment = [0 for i in range(len(jobinfo))]
s_idx = sorted(range(len(jobinfo)), key=(lambda x: -jobinfo[x].time))
for i in sidx:
step = (time_per_submission * length(time_per_metajob)) / (len(time_per_metajob) - 1)
span = [-time_per_submission * len(time_per_metajob) + (ii * step) for ii in range(len(time_per_metajob))]
span = [span[x] + time_per_metajob[x] for x in range(len(span))]
idx = span.index(min(span))
metablockassignment[i] = idx
time_per_metajob[idx] += jobinfo[i].time
meta_i = 1
for i in range(int(math.ceil(len(jobinfo) / float(blocksize)))):
idx = [ii for ii in range(len(metablockassignment)) if metablockassignment[ii] == i]
if len(idx) == 0:
continue
for j in range(len(idx)):
options = jobinfo[idx[j]].options
options.submit_now = 0
jobinfo[idx[j]] = rproc(jobinfo[idx[j]].ProcName, jobinfo[idx[j]].P1, jobinfo[idx[j]].Mem, options, jobinfo[idx[j]].time, jobinfo[idx[j]].callfile, resubmission=True)
jobinfo_ = jobinfo[idx]
options = jobinfo[idx[0]].options
options.submit_now = 1
options.verbosity = 1
memory_MB = max([x.Mem for x in jobinfo_])
minutes = sum([int(x.time) for x in jobinfo_])
print 'submitting job %i/%i (%i subjobs) \r' % (i, int(math.ceil(len(jobinfo) / float(blocksize))), len(idx))
meta_jobinfo[meta_i] = rproc('rproc_submit_batch_helper', jobinfo_, memory_MB, options, minutes)
for j in range(len(idx)):
jobinfo[idx[j]].log_fname = meta_jobinfo[meta_i].log_fname
jobinfo[idx[j]].jobid = meta_jobinfo[meta_i].jobid
jobinfo[idx[j]].submission_time = meta_jobinfo[meta_i].submission_time
meta_i += 1
print ''
return (jobinfo, meta_jobinfo)
def rproc_submit_batch_helper(parameters):
# x = rproc_submit_batch_helper(parameters)
print 'Executing a batch of %i jobs in a super-job' % len(parameters)
pp = os.getcwd()
for i in range(len(parameters)):
os.chdir(pp)
print 'starting job %i in file %s' %(i, parameters[i].mat_fname)
print '========================================='
try:
start_proc(parameters[i].mat_fname, parameters[i].data_fname, 0)
except:
print >> sys.stderr, 'execution of start_proc failed'
# remove files
for i in range(len(parameters)):
fname = parameters[i].mat_fname
os.remove(fname) # mat file
os.remove('%spy' % fname.strip('pickle')) # m file
fname = parameters[i].data_fname
os.remove(fname) # data file
return 0
def rproc_time_since_submission(jobinfo):
# time = rproc_time_since_submission(jobinfo)
# returns time in minutes since submission
return (time.time() - jobinfo.submission_time)/60
def rproc_wait(jobinfo, pausetime=120, frac_finished=1.0, resub_on=1, verbosity=2):
# [jobinfo, num_crashed] = rproc_wait(jobinfo, pausetime, frac_finished, resub_on, verbosity)
global rproc_wait_jobinfo
rproc_wait_jobinfo = jobinfo
if resub_on == 1:
print '\n\ncrashed jobs will be resubmitted by rproc_wait'
elif resub_on == -1:
print '\n\ncrashed jobs may be resubmitted by rproc_wait'
else:
print '\n\ncrashed jobs will not be resubmitted by rproc_wait'
if not isinstance(jobinfo, list):
jobinfo = [jobinfo]
num_jobs = 0
num_crashed = 0
for i in range(len(jobinfo)):
if jobinfo[i].created == 1:
if jobinfo[i].time is None:
print >> sys.stderr, 'WARNING: job created but not submitted yet. ignoring'
jobinfo[i].created = 0
else:
num_jobs += 1
num_finished = 0
first_iter = True
while (num_finished < num_jobs * frac_finished) or (num_crashed > 0):
if not first_iter:
time.sleep(pausetime)
first_iter = False
num_finished = 0
num_crashed = 0
crashed_files = 'log files of crashed jobs:'
for id in range(len(jobinfo)):
cur_finished = rproc_finished(jobinfo[id])
(still_running, qstat_line, start_time, status) = rproc_still_running(jobinfo[id])
if status == -1:
return (jobinfo, num_crashed)
jobinfo[id].start_time = start_time
if cur_finished:
num_finished += 1
elif not still_running:
num_finished += 1
num_crashed += 1
crashed_files = '%s\n%s' % (crashed_files, jobinfo[id].log_fname)
if jobinfo[id].crashed_time is None:
jobinfo[id].crashed_time = time.time()
elif 24 * 60 * (time.time() - jobinfo[id].crashed_time) > max(3 * (pausetime/60.0), 0.1) and (resub_on == 1 or (resub_on == -1 and jobinfo[id].resubmit >= jobinfo[id].retries + 1)):
if resub_on == 1:
(reachedlimit, jobwalltime) = rproc_reached_timelimit(jobinfo[id])
if reachedlimit: # check whether the job has been killed because it reached the time limit
if verbosity >= 1:
print 'job has been canceled because it used %1.0fs, but time limit was %1.0fs walltime.\nhence, we increase the time limit to %1.0fs.\n' % (jobwalltime, jobinfo[id].time * 60, max(jobinfo[id].time, jobwalltime) * 2)
jobinfo[id].time = max(jobinfo[id].time, jobwalltime / 60) * 2
elif resub_on == -1:
jobinfo[id].time = jobinfo[id].time_req_resubmit[min(jobinfo[id].retries + 1, len(jobinfo[id].time_req_resubmit) - 1)]
jobinfo[id].Mem = jobinfo[id].mem_req_resubmit[min(jobinfo[id].retries + 1, len(jobinfo[id].mem_req_resubmit) - 1)]
jobinfo[id].start_time = []
if verbosity >= 1:
print 'resubmitting job (%i) with new time and memory limitations: %iMb and %i minutes (retry #%i)\n' % (jobinfo[id].jobid, jobinfo[id].Mem, jobinfo[id].time, jobinfo[id].retries + 1)
if verbosity >= 2:
print 'log file of previous attempt %s\n' % jobinfo[id].log_fname
jobinfo[id] = rproc_resubmit(jobinfo[id])
jobinfo[id].crashed_time = None
num_finished -= 1
else:
if verbosity >= 2:
print '%s' % qstat_line
### hard_time_limit in minutes
if len(jobinfo[id].start_time) > 0 and 24 * 60 * (time.time() - jobinfo[id].start_time) > jobinfo[id].hard_time_limit:
print 'delete job (%i) because hard time limit (%imin) was reached\n' % (jobinfo[id].jobid, jobinfo[id].hard_time_limit)
subprocess.call(['qdel', str(jobinfo[id].jobid)])
if verbosity >= 1:
print '\n%i of %i jobs finished (%i of them crashed) \n' % (num_finished, num_jobs, num_crashed)
if verbosity >= 2:
if len(crashed_files.strip().split('\n')) > 0:
print '%s\n' % crashed_files
if resub_on == 0 and num_finished == num_jobs * frac_finished:
break
if resub_on == -1 and num_finished == num_jobs * frac_finished:
all_tried = True
for i in range(len(jobinfo)):
fin = rproc_finished(jobinfo[i])
if (jobinfo[i].resubmit >= jobinfo[i].retries + 1) and not fin:
all_tried = False
if all_tried:
break
time.sleep(1)
def start_proc(fname, data_fname, rm_flag=True):
# start_proc(fname, data_fname, rm_flag)
global THIS_IS_A_RPROC_PROCESS
THIS_IS_A_RPROC_PROCESS = True
### load and create environment
(ProcName, dirctry, options, callfile) = cPickle.load(open(fname, 'r'))
os.chdir(dirctry)
print '%s on %s started (in %s; from %s %s)' % (ProcName, os.environ['HOSTNAME'], dirctry, fname, data_fname)
print '### job started %s' % time.strftime('%Y-%m-%d %H:%S')
if 'rmpaths' in options:
for i in range(len(options['rmpaths'])):
print 'removing path %s' % options['rmpaths'][i]
while options['rmpaths'][i] in sys.path:
r_idx = sys.path.index(options['rmpaths'][i])
del sys.path[r_idx]
if 'addpaths' in options:
for i in range(len(options['addpaths'])):
if not options['addpaths'][i] in sys.path:
print 'adding path %s' % options['addpaths'][i]
sys.path.append(options['addpaths'][i])
if 'rm_flag' in options:
rm_flag = options['rm_flag']
### create environment
import_list = []
for mod in options['imports']:
module = options['imports'][mod]
if module[1] == 'builtin':
if imp.is_builtin(module[0]) == 1:
exec('import %s' % module[0])
else:
mod_sl = module[0].split('.')
subpaths = get_subpaths(os.path.dirname(module[1]).split('/'))
imported = True
for m in range(len(mod_sl)):
exec('exists = \'%s\' in globals().keys()' % '.'.join(mod_sl[:m+1]))
if not exists and not '.'.join(mod_sl[:m+1]) in import_list and not 'rproc' in mod_sl[:m+1]:
try:
(f, fn, des) = imp.find_module(mod_sl[m], subpaths)
try:
### TODO: This is a bit hacky, but the only way that linalg can be loaded right now
if fn.endswith('scipy'):
import scipy
import_list.append('scipy')
continue
#print '%s = imp.load_module(\'%s\', f, fn, des)' % ('.'.join(mod_sl[:m+1]), '.'.join(mod_sl[:m+1]))
exec('%s = imp.load_module(\'%s\', f, fn, des)' % ('.'.join(mod_sl[:m+1]), '.'.join(mod_sl[:m+1])))
import_list.append('.'.join(mod_sl[:m+1]))
except:
imported = False
finally:
if f is not None:
f.close()
except ImportError:
print >> sys.stderr, 'Module %s could not be found' % '.'.join(mod_sl[:m+1])
imported = False
else:
imported = False
if mod != module[0] and imported:
exec('%s = %s' % (mod, module[0]))
### load data into environment
P1 = cPickle.load(open(data_fname, 'r'))
retval1 = []
retval2 = []
try:
exec('from %s import %s' % (callfile[0], ProcName))
if len(P1) > 0:
retval = eval('%s(P1)' % ProcName)
else:
retval = eval('%s()' % ProcName)
if retval is None:
pass
elif isinstance(retval, tuple):
retval1 = retval[0]
retval2 = retval[1]
else:
retval1 = retval
if not ('no_result_file' in options and options['no_result_file']):
print 'saving results to %s_result.pickle' % os.path.splitext(fname)[0]
cPickle.dump((retval1, retval2), open('%s_result.pickle' % os.path.splitext(fname)[0], 'wb'), -1)
except (NameError, TypeError) as e:
print >> sys.stderr, 'execution of %s failed' % ProcName
print >> sys.stderr, '%s' % str(e)
global MATLAB_RETURN_VALUE
MATLAB_RETURN_VALUE = -1
rm_flag = False
except RprocRerun as e:
# if we rerun, then we should not cleanup
print >> sys.stderr, 'job is marked for rerunning. exiting without finished computations'
else:
if rm_flag:
os.remove(fname) # data file
os.remove('%ssh' % fname.strip('pickle')) # script file
print '### job finished %s' % time.strftime('%Y-%m-%d %H:%S')
def split_walltime(time_str):
factors = [1, 60, 3600, 86400]
seconds = 0
sl = time_str.split(':')
for i, j in enumerate(range(len(sl) - 1, -1, -1)):
if i < len(factors):
seconds += (int(sl[i]) * factors[j])
else:
print >> sys.stderr, 'WARNING: walltime computation exceeds max value'
return seconds
def get_subpaths(sl):
return ['/'.join(sl[:len(sl)-i]) for i in range(len(sl) - 1)]
if __name__ == "__main__":
start_proc(sys.argv[1], sys.argv[2])
|
|
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
_______________________________________________________________________
Data generation for YOLO (You Look Only Once) detection model.
Utility data generation functions are here. You can also run this
file directly as a script to convert large aerial pics with ROIs
into 256x256 tiles with the ROIs adjusted accordingly. The resulting
tiles will be stored in TFRecord format"""
import os
import sys
import json
import math
import argparse
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.python.lib.io import file_io as gcsfile
from tensorflow.python.platform import tf_logging as logging
from trainer_yolo import settings
from trainer_yolo import utils_box as box
from collections import namedtuple
YOLOConfig = namedtuple('yolocfg', 'grid_nn cell_n cell_swarm cell_grow')
tf.logging.set_verbosity(tf.logging.INFO)
def random_hue(images):
"""
A better random hue algorithm that can also change the color
of white surfaces.
:param images:
:return:
"""
mask = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 1, 0],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]], tf.float32)
batch_size = tf.shape(images)[0]
rnd_mask = tf.random_uniform([batch_size], 0, 18, dtype=tf.int32)
mask = tf.gather(mask, rnd_mask)
strength = tf.random_uniform([batch_size, 3], 0.5, 1.0, dtype=tf.float32)
inverse_mask = (1 - mask) * strength
# put masks in image format [batch, 1, 1, 3] the 1-dimensions will broadcast
mask = tf.expand_dims(mask, axis=1)
mask = tf.expand_dims(mask, axis=2)
inverse_mask = tf.expand_dims(inverse_mask, axis=1)
inverse_mask = tf.expand_dims(inverse_mask, axis=2)
# partially drop color channels
images = tf.to_float(images)
images = images * mask + images * inverse_mask
image = tf.cast(images, tf.uint8)
return tf.image.random_hue(image, 0.5) # rotate the color channels too
def almost_sqrt_factors(x):
"""Returns two integers that are close to each other and
multiply to a product that is close to x.
Args:
x: the integer to factor into a nd b
returns:
a, b integers
"""
y = math.sqrt(x)
a = math.floor(y)
b = math.ceil(y)
return int(a), int(b)
def log_tensor(message, tensor):
"""Log the value of a tensor at graph execution time.
Warning: this will only work if the tensor is evaluated in your graph.
Args:
message: Prefix message string
tensor: The tensor to evaluate
"""
tf.Print(tensor, [tensor], message, summarize=10)
def extract_filename_without_extension(filename):
basename = os.path.basename(filename)
barename, extension = os.path.splitext(basename)
return barename, filename
def load_file_list(directory):
# load images, load jsons, associate them by name, XYZ.jpg with XYZ.json
img_files1 = gcsfile.get_matching_files(directory + "/*.jpg")
img_files2 = gcsfile.get_matching_files(directory + "/*.jpeg")
img_files = img_files1 + img_files2
roi_files = gcsfile.get_matching_files(directory + "/*.json")
img_kv = list(map(extract_filename_without_extension, img_files))
roi_kv = list(map(extract_filename_without_extension, roi_files))
all_kv = img_kv + roi_kv
img_dict = dict(img_kv)
roi_dict = dict(roi_kv)
all_dict = dict(all_kv)
outer_join = [(img_dict[k] if k in img_dict else None,
roi_dict[k] if k in roi_dict else None) for k in all_dict]
# keep only those where the jpg and the json are both available
inner_join = list(filter(lambda e: e[0] is not None and e[1] is not None, outer_join))
if len(inner_join) == 0:
return [], []
else:
img_list, roi_list = zip(*inner_join) # unzip, results are a tuple of img names and a tuple of roi names
return list(img_list), list(roi_list)
def gcsload(filename):
# this function is here just to log loaded files
logging.info("loaded: {}".format(filename))
return gcsfile.read_file_to_string(filename, binary_mode=True)
def batch_random_orientation(images, rois, tile_size):
return tf.map_fn(lambda a: box.random_orientation(*a, tile_size=tile_size), (images, rois))
def batch_yolo_roi_attribution(tiles, target_rois, yolo_cfg):
# target_rois format: [tiles_n, max_per_tile, 4] 4 for x1, y1, x2, y2 scale 0..1 where 1.0 is the tile size
tile = tf.constant([0, 0, 1.0, 1.0], tf.float32)
return tf.map_fn(lambda rois: yolo_roi_attribution(tile, rois, yolo_cfg), target_rois)
def yolo_roi_attribution(tile, rois, yolo_cfg):
# Tile divided in grid_nn x grid_nn grid
# Recognizing cell_n boxes per grid cell
# For each tile, for each grid cell, determine the cell_n largest ROIs centered in that cell
# Output shape [tiles_n, grid_nn, grid_nn, cell_n, 3] 3 for x, y, w
# dynamic number of rois
rois = tf.reshape(rois, [-1, 4]) # I know the shape but Tensorflow does not
rois_n = tf.shape(rois)[0] # known shape [n, 4]
if yolo_cfg.cell_n == 2 and yolo_cfg.cell_swarm:
yolo_target_rois = box.n_experimental_roi_selection_strategy(tile, rois, rois_n,
yolo_cfg.grid_nn,
yolo_cfg.cell_n,
yolo_cfg.cell_grow)
elif not yolo_cfg.cell_swarm:
yolo_target_rois = box.n_largest_rois_in_cell_relative(tile, rois, rois_n,
yolo_cfg.grid_nn,
yolo_cfg.cell_n)
else:
raise ValueError('Ground truth ROI selection strategy cell_swarm is only implemented for cell_n=2')
# maybe not needed
yolo_target_rois = tf.reshape(yolo_target_rois, [yolo_cfg.grid_nn,
yolo_cfg.grid_nn,
yolo_cfg.cell_n, 3]) # 3 for x, y, w
return yolo_target_rois
def generate_slice(pixels, rois, fname, yolo_cfg, rnd_hue, rnd_orientation, repeat_tiles, rnd_distmax, idx):
# dynamic image shapes
img_shape = tf.cast(tf.shape(pixels), tf.float32) # known shape [height, width, 3]
img_shape = tf.reshape(img_shape, [3]) # tensorflow needs help here
img_h, img_w, _ = tf.unstack(img_shape)
# dynamic number of rois
rois = tf.reshape(rois, [-1, 4]) # I know the shape but Tensorflow does not
rois_n = tf.shape(rois)[0] # known shape [n, 4]
TILE_INTERSECT_FRACTION = 0.75
# random displacements around each ROI (typically 1.4 to 3.0. Fixed at 2.0 for all evals)
# adjusted so that tiles with planes and no planes happen with roughly equal frequency
RANDOM_MAX_DISTANCE = rnd_distmax*settings.TILE_SIZE
N_RANDOM_POSITIONS = repeat_tiles # repeat_tiles * max nb of planes in one input image = nb of tiles generated in RAM (watch out!)
# you can increase sdtev to reach more zones without airplanes
rnd_x = tf.round(tf.truncated_normal([N_RANDOM_POSITIONS], mean=0.0, stddev=RANDOM_MAX_DISTANCE/2.0))
rnd_y = tf.round(tf.truncated_normal([N_RANDOM_POSITIONS], mean=0.0, stddev=RANDOM_MAX_DISTANCE/2.0))
def many_tiles_around_this_one(roi):
roi_x1, roi_y1, roi_x2, roi_y2 = tf.unstack(roi, axis=0)
# center coordinates of the roi
roi_x = (roi_x1 + roi_x2) / 2.0
roi_y = (roi_y1 + roi_y2) / 2.0
# create N_RANDOM_POSITIONS rois centered on the original
# but with a random translation and of size [TILE_SIZE, TILE_SIZE]
roi_x = tf.add(roi_x, rnd_x) # broadcasting !
roi_y = tf.add(roi_y, rnd_y) # broadcasting !
roi_x1 = tf.add(roi_x, -settings.TILE_SIZE/2.0)
roi_y1 = tf.add(roi_y, -settings.TILE_SIZE/2.0)
roi_x2 = tf.add(roi_x, settings.TILE_SIZE/2.0)
roi_y2 = tf.add(roi_y, settings.TILE_SIZE/2.0)
roisx = tf.stack([roi_x1, roi_y1, roi_x2, roi_y2], axis=1)
return roisx
# for each roi, generate N_RANDOM_POSITIONS translated ROIs
tiles = tf.map_fn(many_tiles_around_this_one, rois, dtype=tf.float32, name="jitter")
tiles = tf.reshape(tiles, [-1, 4]) # flatten all generated random ROIs
# dynamic number of tiles
tiles_n = tf.shape(tiles)[0] # known shape [n, 4]
def count_planes(roi):
inter = box.boxintersect(roi, rois, TILE_INTERSECT_FRACTION)
return tf.reduce_sum(tf.cast(inter, dtype=tf.int32))
# plane counting
plane_counts = tf.map_fn(count_planes, tiles, dtype=tf.int32)
# count up to 1 max (planes/no planes)
# or count up to 3 max (0, 1, 2, lots of planes)
plane_counts = tf.minimum(plane_counts, 1)
# debug
#plane_counts3 = tf.count_nonzero(tf.floor_div(plane_counts, 3))
#plane_counts2 = tf.count_nonzero(tf.floor_div(plane_counts, 2)) - plane_counts3
plane_counts1 = tf.count_nonzero(tf.floor_div(plane_counts, 1)) #- plane_counts3 - plane_counts2
plane_counts0 = tf.count_nonzero(tf.add(plane_counts, 1)) - plane_counts1 #- plane_counts2 - plane_counts3
tf.Print(tiles_n, [tiles_n, plane_counts0, plane_counts1],
"Generating tiles [total tiles][tiles with no planes][tiles with 1+ planes]: ")
# Vocabulary:
# "tile": a 256x256 region under consideration
# "cell": tiles are evenly divided into 4 x 4 = 16 cells
# "roi": a plane bounding box (ground truth)
# Compute ground truth ROIs
target_rois = box.rois_in_tiles_relative_and_pad(tiles, rois, settings.MAX_TARGET_ROIS_PER_TILE) # shape [n_tiles, MAX_TARGET_ROIS_PER_TILE, 4]
# resize rois to units used by crop_and_resize
# TODO: refactor unit conversion into utils_box
tile_x1, tile_y1, tile_x2, tile_y2 = tf.unstack(tiles, axis=1)
tile_y1 = tile_y1 / img_h
tile_x1 = tile_x1 / img_w
tile_y2 = tile_y2 / img_h
tile_x2 = tile_x2 / img_w
# crop_and_resize expects coordinates in format [y1, x1, y2, x2]
tiles = tf.stack([tile_y1, tile_x1, tile_y2, tile_x2], axis=1)
indices = tf.zeros([tiles_n], dtype=tf.int32) # all the rois refer to image #0 (there is only one)
# expand_dims needed because crop_and_resize expects a batch of images
image_tiles = tf.image.crop_and_resize(tf.expand_dims(pixels, 0), tiles, indices, [settings.TILE_SIZE, settings.TILE_SIZE])
# crop_and_resize does not output a defined pixel depth but the convolutional layers need it
image_tiles = tf.reshape(image_tiles, [-1, settings.TILE_SIZE, settings.TILE_SIZE, 3]) # 3 for r, g, b
image_tiles = tf.cast(image_tiles, tf.uint8)
if rnd_orientation:
image_tiles, target_rois = batch_random_orientation(image_tiles, target_rois, 1.0)
# Compute ground truth ROIs assigned to YOLO grid cells
yolo_target_rois = batch_yolo_roi_attribution(tiles, target_rois, yolo_cfg)
if rnd_hue: # random hue shift for all training images
image_tiles = random_hue(image_tiles)
# filename containing the airport name for logging and debugging
filenames = tf.tile([fname], [tiles_n])
features, labels = features_and_labels(image_tiles, yolo_target_rois, target_rois, plane_counts, filenames)
return tf.data.Dataset.from_tensor_slices((features, labels))
def decode_json_py(str):
obj = json.loads(str.decode('utf-8'))
rois = np.array([(roi['x'], roi['y'], roi['x']+roi['w'], roi['y']+roi['w']) for roi in obj["markers"]], dtype=np.float32)
return rois
def decode_image(img_bytes):
pixels = tf.image.decode_image(img_bytes, channels=3)
return tf.cast(pixels, tf.uint8)
def decode_image_and_json_bytes(img_bytes, json_bytes):
# decode jpeg
pixels = decode_image(img_bytes)
# parse json
rois = tf.py_func(decode_json_py, [json_bytes], [tf.float32])
rois = tf.reshape(rois[0], [-1, 4])
return pixels, rois
def load_img_and_json_files(img_filename, roi_filename):
log_tensor("Loading ", img_filename)
img_bytes = tf.read_file(img_filename)
json_bytes = tf.read_file(roi_filename)
pixels, rois = decode_image_and_json_bytes(img_bytes, json_bytes)
return pixels, rois, img_filename
def generate(pixels, rois, fname, repeat_slice, repeat_tiles, yolo_cfg, rnd_hue, rnd_orientation, rnd_distmax):
# generate_slice generates random image tiles in memory from a large aerial shot
# we call it multiple tiles to get more random tiles from the same image, without exceeding available memory.
return tf.data.Dataset.range(repeat_slice).flat_map(lambda i: generate_slice(pixels, rois, fname, yolo_cfg,
rnd_hue, rnd_orientation,
repeat_tiles, rnd_distmax, i))
#TODO: rename shuffle_buf to shuffle_buf_size for clarity
def init_dataset_from_tfrecords(tfrec_filelist, batch_size, shuffle_buf, yolo_cfg, rnd_hue, rnd_orientation):
fileset = np.array(tfrec_filelist)
np.random.shuffle(fileset) # shuffle filenames
dataset = tf.data.TFRecordDataset(fileset, buffer_size=10*1024*1024, num_parallel_reads=16)
if shuffle_buf > 0:
dataset = dataset.shuffle(shuffle_buf)
dataset = dataset.apply(tf.contrib.data.map_and_batch(lambda tfrec: read_tfrecord_features(tfrec, yolo_cfg, rnd_hue, rnd_orientation),
batch_size,
num_parallel_batches=16))
return dataset
def init_train_dataset_from_images(img_filelist, roi_filelist, batch_size, shuffle_buf, yolo_cfg, rnd_hue, rnd_orientation, tiles_per_gt_roi, rnd_distmax):
# Each call to generate_slice produces all the tiles in memory. Calling it once per airport would OOM.
# To generate 100 tiles around each ROI (for example) we call generate_slice 10 times, generating 10
# tiles around each ROI every time.
repeat_slice, repeat_tiles = almost_sqrt_factors(tiles_per_gt_roi)
fileset = tf.data.Dataset.from_tensor_slices((tf.constant(img_filelist), tf.constant(roi_filelist)))
fileset = fileset.shuffle(1000) # shuffle filenames
dataset = fileset.map(load_img_and_json_files)
dataset = dataset.flat_map(lambda pix, rois, fname: generate(pix, rois, fname,
repeat_slice=repeat_slice,
repeat_tiles=repeat_tiles,
yolo_cfg=yolo_cfg,
rnd_hue=rnd_hue,
rnd_orientation=rnd_orientation,
rnd_distmax=rnd_distmax))
if shuffle_buf > 0:
dataset = dataset.shuffle(shuffle_buf)
dataset = dataset.batch(batch_size)
return dataset
def init_eval_dataset_from_images(img_filelist, roi_filelist, eval_batch_size, yolo_cfg):
fileset = tf.data.Dataset.from_tensor_slices((tf.constant(img_filelist), tf.constant(roi_filelist)))
dataset = fileset.map(load_img_and_json_files)
dataset = dataset.flat_map(lambda pix, rois, fname: generate(pix, rois, fname,
repeat_slice=1,
repeat_tiles=20, # 1*20 tiles per ground truth ROI
yolo_cfg=yolo_cfg,
rnd_hue=False,
rnd_orientation=False,
rnd_distmax=2.0))
dataset = dataset.batch(eval_batch_size)
return dataset
def train_dataset_finalize(dataset, cache_after_n_epochs):
if cache_after_n_epochs > 0:
dataset = dataset.repeat(cache_after_n_epochs)
dataset = dataset.cache(tempfile.mkdtemp(prefix="datacache") + "/datacache")
dataset = dataset.repeat() # indefinitely
dataset = dataset.prefetch(1)
return dataset
def eval_dataset_finalize(dataset):
# caching does not work for the eval dataset
#dataset = dataset.cache(tempfile.mkdtemp(prefix="evaldatacache") + "/evaldatacache")
dataset = dataset.repeat(1)
dataset = dataset.prefetch(1)
return dataset
def features_and_labels(image, yolo_target_rois, target_rois, count, fnames):
features = {'image': image}
labels = {'yolo_target_rois': yolo_target_rois, 'target_rois': target_rois, 'count': count, 'fnames': fnames}
return features, labels
def train_dataset_from_images(img_filelist, roi_filelist, batch_size, shuffle_buf, yolo_cfg, rnd_hue, rnd_orientation, tiles_per_gt_roi, rnd_distmax, cache_after_n_epochs=0):
dataset = init_train_dataset_from_images(img_filelist, roi_filelist, batch_size, shuffle_buf, yolo_cfg, rnd_hue, rnd_orientation, tiles_per_gt_roi, rnd_distmax)
return train_dataset_finalize(dataset, cache_after_n_epochs)
def train_dataset_from_tfrecords(tfrec_filelist, batch_size, shuffle_buf, yolo_cfg, rnd_hue, rnd_orientation, cache_after_n_epochs=0):
dataset = init_dataset_from_tfrecords(tfrec_filelist, batch_size, shuffle_buf, yolo_cfg, rnd_hue, rnd_orientation)
return train_dataset_finalize(dataset, cache_after_n_epochs)
def eval_dataset_from_images(img_filelist, roi_filelist, eval_batch_size, yolo_cfg):
dataset = init_eval_dataset_from_images(img_filelist, roi_filelist, eval_batch_size, yolo_cfg)
return eval_dataset_finalize(dataset)
def eval_dataset_from_tfrecords(tfrec_filelist, eval_batch_size, yolo_cfg):
dataset = init_dataset_from_tfrecords(tfrec_filelist, eval_batch_size, 0, yolo_cfg, False, False)
# 0 = no shuffling, False = no random hue shift, False = no random orientation
return eval_dataset_finalize(dataset)
def write_tfrecord_features(tfrec_filewriter, img_bytes, roi_floats, name_bytes):
# helper function for TFRecords generation
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) # [value] for inputs of type 'bytes'
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value)) # value for inputs if type 'list'
tfrec_filewriter.write(tf.train.Example(features=tf.train.Features(feature={
"img": _bytes_feature(img_bytes),
"rois": _float_feature(roi_floats),
"name": _bytes_feature(name_bytes)})).SerializeToString())
def read_tfrecord_features(example, yolo_cfg, rnd_hue, rnd_orientation):
features = {
"img": tf.FixedLenFeature((), tf.string),
"rois": tf.VarLenFeature(tf.float32),
"name": tf.FixedLenFeature((), tf.string)
}
parsed_example = tf.parse_single_example(example, features)
pixels = decode_image(parsed_example["img"])
rois = tf.sparse_tensor_to_dense(parsed_example["rois"])
rois = tf.reshape(rois * settings.TILE_SIZE, [-1, 4])
airport_name = parsed_example["name"]
# rois format: x1, y1, x2, y2 in [0..TILE_SIZE]
if rnd_orientation:
pixels, rois = box.random_orientation(pixels, rois, settings.TILE_SIZE)
if rnd_hue:
pixels = random_hue(tf.expand_dims(pixels, axis=0))
# TODO: refactor coordinate formats so that target_rois and yolo_target_rois use the same format
pixels = tf.reshape(pixels, [settings.TILE_SIZE, settings.TILE_SIZE, 3]) # 3 for r, g, b
# the tile is already cut
tile = tf.constant([0, 0, settings.TILE_SIZE, settings.TILE_SIZE], tf.float32)
one_tile = tf.expand_dims(tile, axis=0)
# Compute ground truth ROIs
target_rois = box.rois_in_tiles_relative_and_pad(one_tile, rois, settings.MAX_TARGET_ROIS_PER_TILE) # shape [n_tiles, MAX_TARGET_ROIS_PER_TILE, 4]
target_rois = tf.reshape(target_rois, [settings.MAX_TARGET_ROIS_PER_TILE, 4]) # 4 for x1, y1, x2, y2
# Compute ground truth ROIs assigned to YOLO grid cells
yolo_target_rois = yolo_roi_attribution(tile, rois, yolo_cfg)
yolo_target_rois = tf.reshape(yolo_target_rois, [yolo_cfg.grid_nn, yolo_cfg.grid_nn, yolo_cfg.cell_n, 3]) # 3 for x, y, w
# TODO: remove plane_counts entirely from the model (dummy 0 for the time being)
return features_and_labels(pixels, yolo_target_rois, target_rois, tf.constant(0), airport_name)
def run_data_generation(data, output_dir, record_batch_size, shuffle_buf, tiles_per_gt_roi, rnd_distmax, rnd_orientation, is_eval):
img_filelist, roi_filelist = load_file_list(data)
# sanity checks and log messages
if len(img_filelist) > 0:
logging.log(logging.INFO, "Generating {} data.".format("eval" if is_eval else "training"))
else:
logging.log(logging.INFO, "No image/json pairs found in folder {}. Skipping.".format(data))
return
# dummy args only used in YOLO box assignments, which will be discarded anyway
# TODO: refactor these outside of the generate_slice function
yolo_cfg = YOLOConfig(grid_nn = 16, cell_n = 2, cell_swarm = True, cell_grow = 1.0)
if is_eval:
dataset = init_eval_dataset_from_images(img_filelist, roi_filelist, record_batch_size, yolo_cfg)
else:
dataset = init_train_dataset_from_images(img_filelist, roi_filelist, record_batch_size, shuffle_buf, yolo_cfg,
False, rnd_orientation, tiles_per_gt_roi, rnd_distmax) # False = no rnd hue
dataset = dataset.repeat(1)
###
# TF graph for JPEG image encoding
features, labels = dataset.make_one_shot_iterator().get_next()
image_tiles = features['image']
fname = labels['fnames']
target_rois = labels['target_rois'] # shape [n_tiles, MAX_TARGET_ROIS_PER_TILE, 4]
encoded_jpegs = tf.map_fn(lambda image_bytes:
tf.image.encode_jpeg(image_bytes, optimize_size=True, chroma_downsampling=False),
image_tiles, dtype=tf.string)
# end of TF graph for image encoding
###
i = 0
with tf.Session() as sess:
while True:
try:
image_jpegs_r, target_rois_r, fname_r = sess.run([encoded_jpegs, target_rois, fname])
except tf.errors.OutOfRangeError:
break
except tf.errors.NotFoundError:
break
i += 1
# write ROIs
basename = os.path.basename(fname_r[0].decode("utf-8"))
basename, _ = os.path.splitext(basename)
filename = os.path.join(output_dir, "{}tiles{:06}_{}.tfrecord".format(record_batch_size, i, basename))
with tf.python_io.TFRecordWriter(filename) as file:
for one_image_jpeg, per_image_target_rois in zip(image_jpegs_r, target_rois_r):
nonempty_target_rois = filter(lambda roi: abs(roi[2]-roi[0]) > 0 and # roi format is x1y1x2y2
abs(roi[3]-roi[1]) > 0, per_image_target_rois)
nonempty_target_rois = np.array(list(nonempty_target_rois), np.float32)
nonempty_target_rois = np.reshape(nonempty_target_rois, [-1]).tolist()
write_tfrecord_features(file, one_image_jpeg, nonempty_target_rois, fname_r[0]) # write TFRecord
def datagen_main(argv):
parser = argparse.ArgumentParser()
def str2bool(v): return v=='True'
parser.add_argument('--job-dir', default="checkpoints", help='Not used in datagen mode but required by ML engine')
parser.add_argument('--data', default="sample_data/USGS_public_domain_airports", help='Path to data file (can be on Google cloud storage gs://...)')
parser.add_argument('--output-dir', default="tilecache", help='Folder where generated training and eval tiles will be stored (can be on Google cloud storage gs://...)')
parser.add_argument('--record-batch-size', default=100, type=int, help='How many tiles per TFRecord file in the output')
parser.add_argument('--shuffle-buf', default=10000, type=int, help='Size of the shuffle buffer for shuffling tiles. 0 to disable shuffling.')
parser.add_argument('--hp-data-tiles-per-gt-roi', default=100, type=int, help='Data generation hyperparameter: number of training tiles generated around each ground truth ROI')
parser.add_argument('--hp-data-rnd-distmax', default=2.0, type=float, help='Data generation hyperparameter: training tiles selection max random distance from ground truth ROI (always 2.0 for eval tiles)')
parser.add_argument('--hp-data-rnd-orientation', default=True, type=str2bool, help='Data generation hyperparameter: data augmentation by rotating and flipping tiles.')
args = parser.parse_args()
data_eval = args.data + "_eval"
output_dir_eval = args.output_dir + "_eval"
if not gcsfile.file_exists(args.output_dir) or not gcsfile.file_exists(output_dir_eval):
logging.log(logging.ERROR, "Error: both the otput path \"{}\" and the eval "
"output path \"{}\" must exist. Please create them "
"before starting data generation.".format(args.output_dir, output_dir_eval))
exit(-1)
logging.log(logging.INFO, "Training data path: " + args.data)
logging.log(logging.INFO, "Eval data path: " + data_eval)
logging.log(logging.INFO, "Command-line parameters only affect training data generation. "
"Eval data is generated with hard-coded parameters so as to offer "
"a consistent evaluation benchmark.")
rnd_distmax = args.hp_data_rnd_distmax
tiles_per_gt_roi = args.hp_data_tiles_per_gt_roi
rnd_orientation = args.hp_data_rnd_orientation
# training and eval data generation
run_data_generation(args.data, args.output_dir, args.record_batch_size, args.shuffle_buf, tiles_per_gt_roi, rnd_distmax, rnd_orientation, is_eval=False)
run_data_generation(data_eval, output_dir_eval, args.record_batch_size, args.shuffle_buf, tiles_per_gt_roi, rnd_distmax, rnd_orientation, is_eval=True)
if __name__ == '__main__':
datagen_main(sys.argv)
|
|
""" Steward extension for monitoring servers """
from __future__ import unicode_literals
import imp
import os
import sys
from datetime import timedelta
import inspect
import logging
import yaml
from pyramid.path import DottedNameResolver
from pyramid.settings import aslist
from .check import Check
from .handlers import BaseHandler
LOG = logging.getLogger(__name__)
CHECK_MODULE = 'steward_palantir.plugin_checks'
HANDLER_MODULE = 'steward_palantir.plugin_handlers'
sys.modules[CHECK_MODULE] = imp.new_module(CHECK_MODULE)
sys.modules[HANDLER_MODULE] = imp.new_module(HANDLER_MODULE)
def iterate_files(filedir, loaders):
""" Generator for file data """
LOG.debug("Loading palantir files from '%s'", filedir)
for filename in os.listdir(filedir):
_, ext = os.path.splitext(filename)
if ext not in loaders:
continue
absfile = os.path.abspath(os.path.join(filedir, filename))
for result in loaders[ext](absfile):
yield result
def load_yaml_checks(filepath):
""" Load checks from yaml files """
try:
with open(filepath, 'r') as infile:
file_data = yaml.safe_load(infile)
for name, data in file_data.iteritems():
yield Check(name, **data)
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
LOG.exception("Could not load '%s'", filepath)
def load_python_checks(filepath):
""" Load checks from a file path """
module_name, _ = os.path.splitext(os.path.basename(filepath))
module_path = os.path.dirname(filepath)
module_desc = imp.find_module(module_name, [module_path])
fullname = CHECK_MODULE + '.' + module_name
module = imp.load_module(fullname, *module_desc)
for _, member in inspect.getmembers(module, inspect.isclass):
if issubclass(member, Check) and member != Check:
yield member()
def load_handlers_from_path(filepath):
""" Load check handlers from a file path """
module_name, _ = os.path.splitext(os.path.basename(filepath))
module_path = os.path.dirname(filepath)
module_desc = imp.find_module(module_name, [module_path])
fullname = HANDLER_MODULE + '.' + module_name
module = imp.load_module(fullname, *module_desc)
for _, member in inspect.getmembers(module, inspect.isclass):
if issubclass(member, BaseHandler) and member != BaseHandler:
yield member
DEFAULT_LOADERS = {
'.yaml': load_yaml_checks,
'.py': load_python_checks,
}
def include_client(client):
""" Add methods to the client """
client.set_cmd('palantir.alerts', 'steward_palantir.client.do_alerts')
client.set_cmd('palantir.checks', 'steward_palantir.client.do_checks')
client.set_cmd('palantir.status', 'steward_palantir.client.do_status')
client.set_cmd('palantir.minions', 'steward_palantir.client.do_minions')
client.set_cmd('palantir.run_check',
'steward_palantir.client.do_run_check')
client.set_cmd('palantir.resolve', 'steward_palantir.client.do_resolve')
client.set_cmd('palantir.enable_minion',
'steward_palantir.client.do_minion_enable')
client.set_cmd('palantir.disable_minion',
'steward_palantir.client.do_minion_disable')
client.set_cmd('palantir.enable_check',
'steward_palantir.client.do_check_enable')
client.set_cmd('palantir.disable_check',
'steward_palantir.client.do_check_disable')
client.set_cmd('palantir.enable_minion_check',
'steward_palantir.client.do_minion_check_enable')
client.set_cmd('palantir.disable_minion_check',
'steward_palantir.client.do_minion_check_disable')
try:
checks = client.cmd('palantir/check/list').json().keys()
client.set_autocomplete('palantir.run_check', checks)
client.set_autocomplete('palantir.checks', checks)
client.set_autocomplete('palantir.enable_check', checks)
client.set_autocomplete('palantir.disable_check', checks)
minions = client.cmd('palantir/minion/list').json().keys()
client.set_autocomplete('palantir.enable_minion', minions)
client.set_autocomplete('palantir.disable_minion', minions)
client.set_autocomplete('palantir.enable_minion_check', minions +
checks)
client.set_autocomplete('palantir.disable_minion_check', minions +
checks)
client.set_autocomplete('palantir.status', minions + checks)
client.set_autocomplete('palantir.resolve', minions + checks)
except Exception:
# autocomplete isn't mandatory
LOG.warn("Failed to load palantir autocomplete")
def prune(tasklist):
""" Prune the minions and checks regularly """
response = tasklist.post('palantir/prune')
if not response.status_code == 200:
LOG.warning("Failed to prune palantir minions and checks\n%s",
response.text)
def load_checks(settings):
""" Load all palantir checks """
checks = {}
checks_dir = settings.get('palantir.checks_dir', '/etc/steward/checks')
required_meta = set(aslist(settings.get('palantir.required_meta', [])))
for check in iterate_files(checks_dir, DEFAULT_LOADERS):
if check.name in checks:
LOG.error("Duplicate Palantir check '%s'", check.name)
continue
missing_meta = required_meta - set(check.meta.keys())
if missing_meta:
LOG.error("Check '%s' is missing meta field(s) '%s'", check.name,
', '.join(missing_meta))
continue
checks[check.name] = check
return checks
def load_handlers(settings):
""" Load all additional handlers """
handlers = {}
name_resolver = DottedNameResolver(__package__)
handler_files = aslist(settings.get('palantir.handlers',
['/etc/steward/handlers']))
handler_files.append('steward_palantir.handlers')
for mod_name in handler_files:
# If a file or directory, import and load the handlers
if os.path.exists(mod_name):
loaders = {
'.py': load_handlers_from_path,
}
if os.path.isdir(mod_name):
for handler in iterate_files(mod_name, loaders):
handlers[handler.name] = handler
else:
for handler in load_handlers_from_path(mod_name):
handlers[handler.name] = handler
continue
module = name_resolver.resolve(mod_name.strip())
# If a reference to a handler directly, add it
if inspect.isclass(module) and issubclass(module, BaseHandler):
handlers[module.name] = module
continue
# Otherwise, import the module and search for handlers
for _, member in inspect.getmembers(module, inspect.isclass):
if issubclass(member, BaseHandler) and member != BaseHandler:
handlers[member.name] = member
return handlers
def include_tasks(config):
""" Add tasks """
checks_dir = config.settings.get('palantir.checks_dir',
'/etc/steward/checks')
for check in iterate_files(checks_dir, DEFAULT_LOADERS):
config.add_scheduled_task(check.name, {
'schedule': timedelta(**check.schedule),
'task': 'steward_palantir.tasks.run_check',
'args': [check.name],
})
config.add_scheduled_task('palantir_prune', {
'schedule': timedelta(minutes=10),
'task': 'steward_palantir.tasks.prune',
})
config.registry.palantir_checks = load_checks(config.settings)
def post_setup_load_handlers():
""" Load handlers as a callback """
config.registry.palantir_handlers = load_handlers(config.settings)
config.after_setup.append(post_setup_load_handlers)
def includeme(config):
""" Configure the app """
settings = config.get_settings()
# Add the handlers
config.registry.palantir_handlers = load_handlers(settings)
# Load the checks
config.registry.palantir_checks = load_checks(settings)
# Set up the route urls
config.add_route('palantir_list_checks', '/palantir/check/list')
config.add_route('palantir_get_check', '/palantir/check/get')
config.add_route('palantir_run_check', '/palantir/check/run')
config.add_route('palantir_toggle_check', '/palantir/check/toggle')
config.add_route('palantir_list_alerts', '/palantir/alert/list')
config.add_route('palantir_get_alert', '/palantir/alert/get')
config.add_route('palantir_resolve_alert', '/palantir/alert/resolve')
config.add_route('palantir_list_minions', '/palantir/minion/list')
config.add_route('palantir_get_minion', '/palantir/minion/get')
config.add_route('palantir_toggle_minion', '/palantir/minion/toggle')
config.add_route('palantir_delete_minion', '/palantir/minion/delete')
config.add_route('palantir_toggle_minion_check',
'/palantir/minion/check/toggle')
config.add_route('palantir_list_minion_checks',
'/palantir/minion/check/list')
config.add_route('palantir_get_minion_check', '/palantir/minion/check/get')
config.add_route('palantir_list_handlers', '/palantir/handler/list')
config.add_route('palantir_prune', '/palantir/prune')
config.scan(__package__ + '.views')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Stephane Caron <stephane.caron@normalesup.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import numpy
import os
import pickle
import pylab
import sys
sys.path.append('.')
sys.path.append('..')
from numpy import array, cos, pi, sqrt
from rrtcmp import Tunings, TorqueSampleState
class Dump(object):
def __init__(self, dump_dict):
self.tunings = Tunings(dump_dict['tunings'])
State = TorqueSampleState.Factory(self.tunings)
goal_state = State(q=[-pi, 0], qd=[0, 0])
class Node(object):
def __init__(self, node_dict):
self.__dict__.update(node_dict)
if 'qd' in node_dict: # RRT
self.state = State(q=self.q, qd=self.qd)
self.goal_dist = goal_state.dist(self.state)
else: # VIP-RRT
dq = sum(sqrt(1. - cos(self.q - goal_state.q))) / 4.
dv = self.v_min
self.goal_dist = (dq + dv) / 2.
class Run(object):
def __init__(self, run_dict):
self.__dict__.update(run_dict)
self.nodes = [Node(node) for node in run_dict['nodes']]
if len(self.nodes) > 0:
self.score = min([node.goal_dist for node in self.nodes])
else:
self.score = 2.0
class VIPRun(Run):
def __init__(self, run_dict):
super(VIPRun, self).__init__(run_dict)
forgotten_root = {
'q': array([0., 0.]),
'parent': None,
'v_min': 0.,
'v_max': 0.,
'iterstep': 0,
'timestamp': self.start_time + 1e-3}
if len(self.nodes) > 0:
max_iterstep = max([node.iterstep for node in self.nodes])
max_timestamp = max([node.timestamp for node in self.nodes])
else:
max_iterstep = 0
max_timestamp = 0
forgotten_goal = {
'q': array([-pi, 0]),
'parent': None,
'v_min': 0.,
'v_max': 0.,
'iterstep': max_iterstep + 1,
'timestamp': max_timestamp + 1e-3}
self.nodes = [Node(forgotten_root)] + self.nodes
self.nodes.append(Node(forgotten_goal))
self.rrt_runs = [Run(p['van_rrt']) for p in dump_dict['planners'] if 'van_rrt' in p.keys()]
self.vip_runs = [VIPRun(p['vip_rrt']) for p in dump_dict['planners'] if 'vip_rrt' in p.keys()]
# TODO: LQR-RRT here
self.goal_state = goal_state
def plot_run(run, color):
# Hack to exclude the case when VIP has not been run
if len(run.nodes) < 3:
return
xvals, yvals, cur_dist = [], [], 1e15
for node in run.nodes:
cur_dist = node.goal_dist if node.goal_dist < cur_dist else cur_dist
xvals.append(node.timestamp - run.start_time)
yvals.append(cur_dist)
pylab.plot(xvals, yvals, color)
def plot_dump(dump, subplot=111):
pylab.subplot(subplot)
map(lambda run: plot_run(run, 'g-'), dump.rrt_runs)
map(lambda run: plot_run(run, 'b-'), dump.vip_runs)
xvals, yvals, _ = compute_means(dump.rrt_runs)
pylab.plot(xvals, yvals, 'g-', linewidth=5)
xvals, yvals, _ = compute_means(dump.vip_runs)
pylab.plot(xvals, yvals, 'b-', linewidth=5)
pylab.xscale('log')
pylab.yscale('linear')
pylab.grid(True)
pylab.xlim(1., 2e4)
pylab.ylim(0, 2e-1)
title = str(dump.tunings)
title = title.replace('[', '\n[')
if 'run_vip = False' in title:
title = title.replace('\n[run_vip = False]', '')
title = title.replace('Tunings with', 'RRT')
else:
title = title.replace('Default tunings', 'VIP-RRT')
pylab.title(title)
def first_entry(run, threshold, default):
for i in range(len(run.nodes)):
if run.nodes[i].goal_dist < threshold:
return run.nodes[i].timestamp - run.start_time
return default
def cumul(runs):
nbruns = len(runs)+0.
maxtime = max([run.nodes[-1].timestamp-run.start_time for run in runs])+100
endtime = 2e4
x = [first_entry(run, 1e-2, endtime) for run in runs]
n = x.count(endtime)
for i in range(n):
x.remove(endtime)
x.sort()
y = [(len(filter(lambda t: t <= xi, x))) / nbruns for xi in x]
x.insert(0, 0)
y.insert(0, 0)
x = x*2
y = y*2
x.sort()
y.sort()
x.pop(0)
y.pop()
x.append(maxtime)
y.append(y[-1])
return numpy.array(x), numpy.array(y)
def compute_means(runs):
"""
Compute the time-series (t, mean(dist_to_goal), std(dist_to_goal))
for a set of runs.
"""
# exclude the case when VIP has not been run
if len(runs[0].nodes) < 3:
return [0], [0], [0]
nruns = len(runs)
times = map(lambda run: [node.timestamp - run.start_time for node in
run.nodes], runs)
dists = map(lambda run: [node.goal_dist for node in run.nodes], runs)
def mindist(distvect):
res = numpy.zeros(len(distvect)+1)
m = 1e10
for i in xrange(len(distvect)):
m = min(distvect[i], m)
res[i] = m
if(res[-2] > 1e-2):
res[-1] = res[-2]
return res
mindists = [mindist(dv) for dv in dists]
maxtime = max([t[-1] for t in times])
time_vect = numpy.arange(0, maxtime + 10, 1)
def interpolate_dist(timesv, distsv, t):
idx = bisect.bisect_left(timesv, t)
return distsv[min(idx, len(distsv) - 1)]
dist_matrix = numpy.zeros((nruns, len(time_vect)))
for i in range(len(time_vect)):
for j in range(len(times)):
dist_matrix[j, i] = interpolate_dist(times[j], mindists[j],
time_vect[i])
return time_vect, numpy.mean(dist_matrix, 0), numpy.std(dist_matrix, 0)
if __name__ == '__main__':
assert len(sys.argv) > 1, "Usage: %s <trace_path>" % sys.argv[0]
trace_dir = sys.argv[1]
print "\n--\nLoading dumps from %s..." % trace_dir
files = os.listdir(trace_dir)
with open('%s/poses.pkl' % trace_dir, 'r') as f:
poses = pickle.load(f)
with open('%s/velocities.pkl' % trace_dir, 'r') as f:
velocities = pickle.load(f)
dump_files = filter(lambda s: s.find("dump") >= 0, files)
dump_dicts = []
for dump_file in dump_files:
with open('%s/%s' % (trace_dir, dump_file), 'r') as f:
dump_dicts.append(pickle.load(f))
dumps = [Dump(dd) for dd in dump_dicts]
for i, dump in enumerate(dumps):
print "Dump %d: %s" % (i, str(dump.tunings))
maxtime = lambda run: (run.nodes[-1].timestamp-run.start_time) < 2e4
dump.rrt_runs = filter(maxtime, dump.rrt_runs)
dump.vip_runs = filter(maxtime, dump.vip_runs)
print "All dumps loaded in `dumps` list."
if "with-plots" in sys.argv:
pylab.ion()
pylab.clf()
for (i, dump) in enumerate(dumps):
plot_dump(dump, subplot=241 + i)
|
|
import random
import string
from datetime import timedelta
from django import forms
from django.conf import settings
from django.contrib import messages, auth
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.views import LoginView
from django.core.mail import send_mail
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.template import loader
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from smartmin.email import build_email_context
from smartmin.views import SmartCRUDL, SmartView, SmartFormView, SmartListView, SmartCreateView, SmartUpdateView
from .models import RecoveryToken, PasswordHistory, FailedLogin, is_password_complex
class UserForm(forms.ModelForm):
new_password = forms.CharField(label=_("New Password"), widget=forms.PasswordInput, strip=False)
groups = forms.ModelMultipleChoiceField(widget=forms.CheckboxSelectMultiple,
queryset=Group.objects.all(), required=False)
def clean_new_password(self):
password = self.cleaned_data['new_password']
# if they specified a new password
if password and not is_password_complex(password):
raise forms.ValidationError(_("Passwords must have at least 8 characters, including one uppercase, "
"one lowercase and one number"))
return password
def save(self, commit=True):
"""
Overloaded so we can save any new password that is included.
"""
is_new_user = self.instance.pk is None
user = super(UserForm, self).save(commit)
# new users should be made active by default
if is_new_user:
user.is_active = True
# if we had a new password set, use it
new_pass = self.cleaned_data['new_password']
if new_pass:
user.set_password(new_pass)
if commit:
user.save()
return user
class Meta:
model = get_user_model()
fields = ('username', 'new_password', 'first_name', 'last_name', 'email', 'groups', 'is_active')
class UserUpdateForm(UserForm):
new_password = forms.CharField(label=_("New Password"), widget=forms.PasswordInput, required=False, strip=False)
def clean_new_password(self):
password = self.cleaned_data['new_password']
if password and not is_password_complex(password):
raise forms.ValidationError(_("Passwords must have at least 8 characters, including one uppercase, "
"one lowercase and one number"))
if password and PasswordHistory.is_password_repeat(self.instance, password):
raise forms.ValidationError(_("You have used this password before in the past year, "
"please use a new password."))
return password
class UserProfileForm(UserForm):
old_password = forms.CharField(label=_("Password"), widget=forms.PasswordInput, required=False, strip=False)
new_password = forms.CharField(label=_("New Password"), widget=forms.PasswordInput, required=False, strip=False)
confirm_new_password = forms.CharField(
label=_("Confirm Password"), widget=forms.PasswordInput, required=False, strip=False
)
def clean_old_password(self):
user = self.instance
if(not user.check_password(self.cleaned_data['old_password'])):
raise forms.ValidationError(_("Please enter your password to save changes."))
return self.cleaned_data['old_password']
def clean_confirm_new_password(self):
if 'new_password' not in self.cleaned_data:
return None
if not self.cleaned_data['confirm_new_password'] and self.cleaned_data['new_password']:
raise forms.ValidationError(_("Confirm the new password by filling the this field"))
if self.cleaned_data['new_password'] != self.cleaned_data['confirm_new_password']:
raise forms.ValidationError(_("New password doesn't match with its confirmation"))
password = self.cleaned_data['new_password']
if password and not is_password_complex(password):
raise forms.ValidationError(_("Passwords must have at least 8 characters, including one uppercase, "
"one lowercase and one number"))
if password and PasswordHistory.is_password_repeat(self.instance, password):
raise forms.ValidationError(_("You have used this password before in the past year, "
"please use a new password."))
return self.cleaned_data['new_password']
class UserForgetForm(forms.Form):
email = forms.EmailField(label=_("Your Email"),)
def clean_email(self):
email = self.cleaned_data['email'].strip()
allow_email_recovery = getattr(settings, 'USER_ALLOW_EMAIL_RECOVERY', True)
if not allow_email_recovery:
raise forms.ValidationError(_("E-mail recovery is not supported, "
"please contact the website administrator to reset your password manually."))
return email
class SetPasswordForm(UserForm):
old_password = forms.CharField(label=_("Current Password"), widget=forms.PasswordInput, required=True, strip=False,
help_text=_("Your current password"))
new_password = forms.CharField(label=_("New Password"), widget=forms.PasswordInput, required=True,
help_text=_("Your new password."), strip=False)
confirm_new_password = forms.CharField(label=_("Confirm new Password"), widget=forms.PasswordInput, required=True,
help_text=_("Confirm your new password."), strip=False)
def clean_old_password(self):
user = self.instance
if not user.check_password(self.cleaned_data['old_password']):
raise forms.ValidationError(_("Please enter your password to save changes"))
return self.cleaned_data['old_password']
def clean_confirm_new_password(self):
if 'new_password' not in self.cleaned_data:
return None
if not self.cleaned_data['confirm_new_password'] and self.cleaned_data['new_password']:
raise forms.ValidationError(_("Confirm your new password by entering it here"))
if self.cleaned_data['new_password'] != self.cleaned_data['confirm_new_password']:
raise forms.ValidationError(_("Mismatch between your new password and confirmation, try again"))
password = self.cleaned_data['new_password']
if password and not is_password_complex(password):
raise forms.ValidationError(_("Passwords must have at least 8 characters, including one uppercase, "
"one lowercase and one number"))
if password and PasswordHistory.is_password_repeat(self.instance, password):
raise forms.ValidationError(_("You have used this password before in the past year, "
"please use a new password."))
return self.cleaned_data['new_password']
class UserCRUDL(SmartCRUDL):
model = get_user_model()
permissions = True
actions = ('create', 'list', 'update', 'profile', 'forget', 'recover', 'expired', 'failed', 'newpassword', 'mimic')
class List(SmartListView):
search_fields = ('username__icontains', 'first_name__icontains', 'last_name__icontains')
fields = ('username', 'name', 'group', 'last_login')
link_fields = ('username', 'name')
default_order = 'username'
add_button = True
template_name = "smartmin/users/user_list.html"
def get_context_data(self, **kwargs):
context = super(UserCRUDL.List, self).get_context_data(**kwargs)
context['groups'] = Group.objects.all()
group_id = self.request.POST.get('group_id', self.request.GET.get('group_id', 0))
context['group_id'] = int(group_id)
return context
def get_group(self, obj):
return ", ".join([group.name for group in obj.groups.all()])
def get_queryset(self, **kwargs):
queryset = super(UserCRUDL.List, self).get_queryset(**kwargs)
group_id = self.request.POST.get('group_id', self.request.GET.get('group_id', 0))
group_id = int(group_id)
# filter by the group
if group_id:
queryset = queryset.filter(groups=group_id)
# ignore superusers and staff users
return queryset.exclude(is_staff=True).exclude(is_superuser=True).exclude(password=None)
def get_name(self, obj):
return obj.get_full_name()
class Create(SmartCreateView):
form_class = UserForm
fields = ('username', 'new_password', 'first_name', 'last_name', 'email', 'groups')
success_message = _("New user created successfully.")
field_config = {
'groups': dict(label=_("Groups"),
help=_("Users will only get those permissions that are allowed for their group.")),
'new_password': dict(label=_("Password"), help=_("Set the user's initial password here.")),
}
def post_save(self, obj):
"""
Make sure our groups are up to date
"""
if 'groups' in self.form.cleaned_data:
for group in self.form.cleaned_data['groups']:
obj.groups.add(group)
return obj
class Update(SmartUpdateView):
form_class = UserUpdateForm
template_name = "smartmin/users/user_update.html"
success_message = "User saved successfully."
fields = ('username', 'new_password', 'first_name', 'last_name', 'email', 'groups', 'is_active', 'last_login')
field_config = {
'last_login': dict(readonly=True, label=_("Last Login")),
'is_active': dict(label=_("Is Active"), help=_("Whether this user is allowed to log into the site")),
'groups': dict(label=_("Groups"),
help=_("Users will only get those permissions that are allowed for their group")),
'new_password': dict(label=_("New Password"),
help=_("You can reset the user's password by entering a new password here")),
}
def post_save(self, obj):
"""
Make sure our groups are up to date
"""
if 'groups' in self.form.cleaned_data:
obj.groups.clear()
for group in self.form.cleaned_data['groups']:
obj.groups.add(group)
# if a new password was set, reset our failed logins
if 'new_password' in self.form.cleaned_data and self.form.cleaned_data['new_password']:
FailedLogin.objects.filter(username__iexact=self.object.username).delete()
PasswordHistory.objects.create(user=obj, password=obj.password)
return obj
class Profile(SmartUpdateView):
form_class = UserProfileForm
success_message = "User profile saved successfully."
fields = ('username', 'old_password', 'new_password', 'confirm_new_password',
'first_name', 'last_name', 'email')
field_config = {
'username': dict(readonly=True, label=_("Username")),
'old_password': dict(label=_("Password"), help=_("Your password")),
'new_password': dict(label=_("New Password"), help=_("If you want to set a new password, enter it here")),
'confirm_new_password': dict(label=_("Confirm New Password"), help=_("Confirm your new password")),
}
def post_save(self, obj):
obj = super(UserCRUDL.Profile, self).post_save(obj)
if 'new_password' in self.form.cleaned_data and self.form.cleaned_data['new_password']:
FailedLogin.objects.filter(username__iexact=self.object.username).delete()
PasswordHistory.objects.create(user=obj, password=obj.password)
return obj
def get_object(self, queryset=None):
return self.request.user
def derive_title(self):
return _("Edit your profile")
class Forget(SmartFormView):
title = _("Password Recovery")
template_name = 'smartmin/users/user_forget.html'
form_class = UserForgetForm
permission = None
success_message = _("An Email has been sent to your account with further instructions.")
success_url = "@users.user_login"
fields = ('email', )
def form_valid(self, form):
email = form.cleaned_data['email']
hostname = getattr(settings, 'HOSTNAME', self.request.get_host())
col_index = hostname.find(':')
domain = hostname[:col_index] if col_index > 0 else hostname
from_email = getattr(settings, 'DEFAULT_FROM_EMAIL', 'website@%s' % domain)
user_email_template = getattr(settings, "USER_FORGET_EMAIL_TEMPLATE", "smartmin/users/user_email.txt")
user = get_user_model().objects.filter(email__iexact=email).first()
context = build_email_context(self.request, user)
if user:
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
RecoveryToken.objects.create(token=token, user=user)
email_template = loader.get_template(user_email_template)
FailedLogin.objects.filter(username__iexact=user.username).delete()
context['user'] = user
context['path'] = "%s" % reverse('users.user_recover', args=[token])
send_mail(_('Password Recovery Request'), email_template.render(context), from_email,
[email], fail_silently=False)
response = super(UserCRUDL.Forget, self).form_valid(form)
return response
class Newpassword(SmartUpdateView):
form_class = SetPasswordForm
fields = ('old_password', 'new_password', 'confirm_new_password')
title = _("Pick a new password")
template_name = 'smartmin/users/user_newpassword.html'
success_message = _("Your password has successfully been updated, thank you.")
def get_context_data(self, *args, **kwargs):
context_data = super(UserCRUDL.Newpassword, self).get_context_data(*args, **kwargs)
context_data['expire_days'] = getattr(settings, 'USER_PASSWORD_EXPIRATION', -1)
context_data['window_days'] = getattr(settings, 'USER_PASSWORD_REPEAT_WINDOW', -1)
return context_data
def has_permission(self, request, *args, **kwargs):
return request.user.is_authenticated
def get_object(self, queryset=None):
return self.request.user
def post_save(self, obj):
obj = super(UserCRUDL.Newpassword, self).post_save(obj)
PasswordHistory.objects.create(user=obj, password=obj.password)
return obj
def get_success_url(self):
return settings.LOGIN_REDIRECT_URL
class Mimic(SmartUpdateView):
fields = ('id',)
def derive_success_message(self):
return _("You are now logged in as %s") % self.object.username
def pre_process(self, request, *args, **kwargs):
user = self.get_object()
Login.as_view()(request)
# After logging in it is important to change the user stored in the session
# otherwise the user will remain the same
request.session[auth.SESSION_KEY] = user.id
request.session[auth.HASH_SESSION_KEY] = user.get_session_auth_hash()
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
class Recover(SmartUpdateView):
form_class = SetPasswordForm
permission = None
success_message = _("Password Updated Successfully. Now you can log in using your new password.")
success_url = '@users.user_login'
fields = ('new_password', 'confirm_new_password')
title = _("Reset your Password")
template_name = 'smartmin/users/user_recover.html'
@classmethod
def derive_url_pattern(cls, path, action):
return r'^%s/%s/(?P<token>\w+)/$' % (path, action)
def pre_process(self, request, *args, **kwargs):
token = self.kwargs.get('token')
validity_time = timezone.now() - timedelta(hours=48)
recovery_token = RecoveryToken.objects.filter(created_on__gt=validity_time, token=token)
if not recovery_token:
messages.info(request, _("Your link has expired for security reasons. "
"Please reinitiate the process by entering your email here."))
return HttpResponseRedirect(reverse("users.user_forget"))
return super(UserCRUDL.Recover, self).pre_process(request, args, kwargs)
def get_object(self, queryset=None):
token = self.kwargs.get('token')
recovery_token = RecoveryToken.objects.get(token=token)
return recovery_token.user
def post_save(self, obj):
obj = super(UserCRUDL.Recover, self).post_save(obj)
validity_time = timezone.now() - timedelta(hours=48)
RecoveryToken.objects.filter(user=obj).delete()
RecoveryToken.objects.filter(created_on__lt=validity_time).delete()
PasswordHistory.objects.create(user=obj, password=obj.password)
return obj
class Expired(SmartView, TemplateView):
permission = None
template_name = 'smartmin/users/user_expired.html'
class Failed(SmartView, TemplateView):
permission = None
template_name = 'smartmin/users/user_failed.html'
def get_context_data(self, *args, **kwargs):
context = super(UserCRUDL.Failed, self).get_context_data(*args, **kwargs)
lockout_timeout = getattr(settings, 'USER_LOCKOUT_TIMEOUT', 10)
failed_login_limit = getattr(settings, 'USER_FAILED_LOGIN_LIMIT', 5)
allow_email_recovery = getattr(settings, 'USER_ALLOW_EMAIL_RECOVERY', True)
context['lockout_timeout'] = lockout_timeout
context['failed_login_limit'] = failed_login_limit
context['allow_email_recovery'] = allow_email_recovery
return context
class Login(LoginView):
template_name = 'smartmin/users/login.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['allow_email_recovery'] = getattr(settings, 'USER_ALLOW_EMAIL_RECOVERY', True)
return context
def post(self, request, *args, **kwargs):
form = self.get_form()
# clean form data
form_is_valid = form.is_valid()
lockout_timeout = getattr(settings, 'USER_LOCKOUT_TIMEOUT', 10)
failed_login_limit = getattr(settings, 'USER_FAILED_LOGIN_LIMIT', 5)
username = self.get_username(form)
if not username:
return self.form_invalid(form)
user = get_user_model().objects.filter(username__iexact=username).first()
valid_password = False
# this could be a valid login by a user
if user:
# incorrect password? create a failed login token
valid_password = user.check_password(form.cleaned_data.get('password'))
if not user or not valid_password:
FailedLogin.objects.create(username=username)
bad_interval = timezone.now() - timedelta(minutes=lockout_timeout)
failures = FailedLogin.objects.filter(username__iexact=username)
# if the failures reset after a period of time, then limit our query to that interval
if lockout_timeout > 0:
failures = failures.filter(failed_on__gt=bad_interval)
# if there are too many failed logins, take them to the failed page
if len(failures) >= failed_login_limit:
return HttpResponseRedirect(reverse('users.user_failed'))
# pass through the normal login process
if form_is_valid:
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
# clean up any failed logins for this user
FailedLogin.objects.filter(username__iexact=self.get_username(form)).delete()
return super().form_valid(form)
def get_username(self, form):
return form.cleaned_data.get('username')
|
|
from functools import update_wrapper
from weakref import WeakSet
from django.apps import apps
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils.functional import LazyObject
from django.utils.module_loading import import_string
from django.utils.text import capfirst
from django.utils.translation import gettext as _, gettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.i18n import JavaScriptCatalog
all_sites = WeakSet()
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite:
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = gettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = gettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = gettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
all_sites.add(self)
def check(self, app_configs):
"""
Run the system checks on all ModelAdmins, except if they aren't
customized at all.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
app_configs = set(app_configs) # Speed up lookups below
errors = []
modeladmins = (o for o in self._registry.values() if o.__class__ is not ModelAdmin)
for modeladmin in modeladmins:
if modeladmin.model._meta.app_config in app_configs:
errors.extend(modeladmin.check())
return errors
def register(self, model_or_iterable, admin_class=None, **options):
"""
Register the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, use ModelAdmin (the default admin
options). If keyword arguments are given -- e.g., list_display --
apply them as options to the admin class.
If a model is already registered, raise AlreadyRegistered.
If a model is abstract, raise ImproperlyConfigured.
"""
admin_class = admin_class or ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregister the given model(s).
If a model isn't already registered, raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raise KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raise KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.items()
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Return True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.urls import path
urls = super().get_urls()
urls += [
path('my_view/', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.urls import include, path, re_path
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
path('', wrap(self.index), name='index'),
path('login/', self.login, name='login'),
path('logout/', wrap(self.logout), name='logout'),
path('password_change/', wrap(self.password_change, cacheable=True), name='password_change'),
path(
'password_change/done/',
wrap(self.password_change_done, cacheable=True),
name='password_change_done',
),
path('jsi18n/', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
path(
'r/<int:content_type_id>/<path:object_id>/',
wrap(contenttype_views.shortcut),
name='view_on_site',
),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
path('%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
re_path(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Return a dictionary of variables to put in the template context for
*every* page in the admin site.
For sites running on a subpath, use the SCRIPT_NAME value if site_url
hasn't been customized.
"""
script_name = request.META['SCRIPT_NAME']
site_url = script_name if self.site_url == '/' and script_name else self.site_url
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
'is_popup': False,
}
def password_change(self, request, extra_context=None):
"""
Handle the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import PasswordChangeView
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'form_class': AdminPasswordChangeForm,
'success_url': url,
'extra_context': {**self.each_context(request), **(extra_context or {})},
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return PasswordChangeView.as_view(**defaults)(request)
def password_change_done(self, request, extra_context=None):
"""
Display the "success" page after a password change.
"""
from django.contrib.auth.views import PasswordChangeDoneView
defaults = {
'extra_context': {**self.each_context(request), **(extra_context or {})},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return PasswordChangeDoneView.as_view(**defaults)(request)
def i18n_javascript(self, request, extra_context=None):
"""
Display the i18n JavaScript that the Django admin requires.
`extra_context` is unused but present for consistency with the other
admin views.
"""
return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)
@never_cache
def logout(self, request, extra_context=None):
"""
Log out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import LogoutView
defaults = {
'extra_context': {
**self.each_context(request),
# Since the user isn't logged out at this point, the value of
# has_permission must be overridden.
'has_permission': False,
**(extra_context or {})
},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return LogoutView.as_view(**defaults)(request)
@never_cache
def login(self, request, extra_context=None):
"""
Display the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import LoginView
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = {
**self.each_context(request),
'title': _('Log in'),
'app_path': request.get_full_path(),
'username': request.user.get_username(),
}
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return LoginView.as_view(**defaults)(request)
def _build_app_dict(self, request, label=None):
"""
Build the app dictionary. The optional `label` parameter filters models
of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
'admin_url': None,
'add_url': None,
}
if perms.get('change') or perms.get('view'):
model_dict['view_only'] = not perms.get('change')
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Return a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Display the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = {
**self.each_context(request),
'title': self.index_title,
'app_list': app_list,
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = {
**self.each_context(request),
'title': _('%(app)s administration') % {'app': app_name},
'app_list': [app_dict],
'app_label': app_label,
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
class DefaultAdminSite(LazyObject):
def _setup(self):
AdminSiteClass = import_string(apps.get_app_config('admin').default_site)
self._wrapped = AdminSiteClass()
# This global object represents the default admin site, for the common case.
# You can provide your own AdminSite using the (Simple)AdminConfig.default_site
# attribute. You can also instantiate AdminSite in your own code to create a
# custom admin site.
site = DefaultAdminSite()
|
|
import collections
class Terminate(Exception):
pass
class SegmentationFault(Exception):
pass
class Operand(collections.namedtuple("Operand", "value ref")):
MASK_REF = 2**12
MASK_VAL = 2**12 - 1
def __new__(cls, data):
obj = super(Operand, cls).__new__(
cls,
value=(int(data) & Operand.MASK_VAL),
ref=(int(data) & Operand.MASK_REF != 0),
)
return obj
def __int__(self):
value = self.value
if self.ref:
value += Operand.MASK_REF
return value
class Opcode(collections.namedtuple("Opcode", "mneumonic opcode value lo hi")):
"""
Each instruction is designed to be 32 bits long with the following
structure,
OOOOLLLLLLLLLLLLLLLLHHHHHHHHHHHHHHHH
O: instruction
L: 'lo' operand
H: 'hi' operand
"""
def __new__(cls, lo, hi):
value = (cls.OPCODE << 28) + (int(lo) << 14) + int(hi)
return super(Opcode, cls).__new__(
cls,
cls.MNEUMONIC,
cls.OPCODE,
value,
int(lo),
int(hi),
)
class Null(Opcode):
MNEUMONIC = "NUL"
OPCODE = 0
def execute(self, memory):
pass
class Store(Opcode):
MNEUMONIC = "STA"
OPCODE = 1
def execute(self, memory):
val = memory.read_eax()
memory.write(self.lo, val)
class Load(Opcode):
MNEUMONIC = "LDA"
OPCODE = 2
def execute(self, memory):
memory.write_eax(self.lo)
class Increment(Opcode):
MNEUMONIC = "INC"
OPCODE = 3
def execute(self, memory):
val = memory.read(self.lo)
memory.write(self.lo, val + 1)
class Decrement(Opcode):
MNEUMONIC = "DEC"
OPCODE = 4
def execute(self, memory):
val = memory.read(self.lo)
memory.write(self.lo, val - 1)
class Addition(Opcode):
MNEUMONIC = "ADD"
OPCODE = 5
def execute(self, memory):
val = (memory.read(self.lo) + memory.read(self.hi)) % 2**16
memory.write(self.lo, val)
class Subtraction(Opcode):
MNEUMONIC = "SUB"
OPCODE = 6
def execute(self, memory):
val = (memory.read(self.lo) - self.hi) % 2**16
memory.write(self.lo, val)
class Jump(Opcode):
MNEUMONIC = "JMP"
OPCODE = 7
def execute(self, memory):
memory.ptr = self.lo - 1
class Move(Opcode):
MNEUMONIC = "MOV"
OPCODE = 8
def execute(self, memory):
val = memory.read(self.hi)
memory.write(self.lo, val)
class Compare(Opcode):
MNEUMONIC = "CMP"
OPCODE = 9
def execute(self, memory):
vlo = memory.read(self.lo)
vhi = memory.read(self.hi)
memory.flag_cmp = 1 if vlo < vhi else 0
class BranchAbove(Opcode):
MNEUMONIC = "BRA"
OPCODE = 10
def execute(self, memory):
if memory.flag_cmp == 0:
memory.ptr = self.lo - 1
class BranchBelow(Opcode):
MNEUMONIC = "BRB"
OPCODE = 11
def execute(self, memory):
if memory.flag_cmp == 1:
memory.ptr = self.lo - 1
class Halt(Opcode):
MNEUMONIC = "HLT"
OPCODE = 12
def execute(self, memory):
raise Terminate()
class Memory(object):
ADDR_IO = 0 # start of memory reserved for device I/O
ADDR_REG = 17 # start of memory reserved for registers
ADDR_FLG = 25 # start of memory reserved for flags
ADDR_PRG = 33 # start of memory reserved for programs
def __init__(self):
self._ram = dict()
self.ptr = Memory.ADDR_PRG
def __len__(self):
return max(self._ram) + 1 if self._ram else 0
def __iter__(self):
for index in range(len(self)):
yield self.read(index)
def read(self, index):
assert 0 <= index
return self._ram[index] if index in self._ram else 0
def write(self, index, value):
assert 0 <= index
assert 0 <= value < 2**32
self._ram[index] = value
def read_eax(self):
return self.read(Memory.ADDR_REG)
def write_eax(self, value):
self.write(Memory.ADDR_REG, value)
def load_program(self, prog):
for index, instruction in zip(range(len(prog)), prog):
self.write(Memory.ADDR_PRG + index, instruction.value)
@property
def flag_cmp(self):
return self.read(Memory.ADDR_FLG)
@flag_cmp.setter
def flag_cmp(self, val):
self.write(Memory.ADDR_FLG, val)
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = a_gan
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.4, +0.4)
plt.ylim(-1, +1)
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
tdv_list = []
ttv_list = []
# Define parameters
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = 2*M_gan
secondmoon.px = a_gan
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
plt.plot(ttv_array, tdv_array, color = 'b', linestyle=':')
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
tdv_list = []
ttv_list = []
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = a_gan
secondmoon = Body()
secondmoon.mass = 3*M_gan
secondmoon.px = a_gan
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
plt.plot(ttv_array, tdv_array, 'r--')
plt.savefig("fig_combi_systems_vii,xi,xii.eps", bbox_inches = 'tight')
|
|
#!/usr/bin/env python
import hashlib
import uuid
import hmac
import time
import memcache
from Cookie import SimpleCookie
from saml2 import mcache
from saml2.mcache import ToOld
from oic.utils import time_util
import logging
logger = logging.getLogger(__name__)
def _expiration(timeout, strformat=None):
if timeout == "now":
return time_util.instant(strformat)
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, time_format=strformat)
class State(object):
def __init__(self, memcached_servers, name, cookie_str="", secret=""):
self.name = name
self.session_cache = mcache.Cache(memcached_servers)
self.ref_cache = memcache.Client(memcached_servers)
self._secret = secret
self.sid = ""
self.get_id(cookie_str)
#print "State.sid: %s" % self.sid
def _sid(self):
sid = str(uuid.uuid4())
while True:
if not self.ref_cache.get(sid):
break
sid = str(uuid.uuid4())
return sid
def __str__(self):
result = {}
for session_id in self.get_sessions():
session = Session(self.session_cache, self.sid, session_id)
result[session_id] = session.active()
return "%s" % (result,)
def known_session(self, session_id):
if session_id in self.get_sessions():
return True
else:
return False
def old_session(self, session_id):
if self.known_session(session_id):
return self.get_session(session_id)
return None
def session_by_alternate_id(self, aid):
for session_id in self.get_sessions():
session = Session(self.session_cache, self.sid, session_id)
if session.sid_digest == aid:
return session
return None
def get_sessions(self):
try:
result = self.ref_cache.get(self.sid)
if result is None:
return []
else:
return result
except KeyError:
return []
def add_session(self, session_id):
""" This is in reality maintained by mcache
:param session_id: Is in fact the entity ID of the SP
:return: A Session instance
"""
sessions = self.get_sessions()
if session_id not in sessions:
sessions.append(session_id)
self.ref_cache.set(self.sid, sessions)
def get_session(self, session_id):
""" Returns an active Session description or a newly constructed.
:param session_id: Is in fact the entity ID of the SP
:return: A Session instance
"""
return Session(self.session_cache, self.sid, session_id, self._secret)
def sessions(self):
""" Iterator of the known sessions """
for session_id in self.get_sessions():
session = Session(self.session_cache, self.sid, session_id)
yield session
def any_active(self):
""" For management purposes. Returns all the active sessions that
exists for this 'user'.
:returr: True if there is at least one active session, otherwise False
"""
for session_id in self.get_sessions():
session = Session(self.session_cache, self.sid, session_id)
try:
info = session.get()
if "authentication" in info and info["authentication"] == "OK":
return True
except ToOld:
pass
return False
def get_id(self, cookie_str=""):
if cookie_str:
#print "-connection by cookie-"
cookie_obj = SimpleCookie(cookie_str)
morsel = cookie_obj.get(self.name)
if morsel is not None:
self.sid = self.parse_cookie(morsel.value)
if not self.sid:
#print "-New connection-"
self.sid = self._sid()
def cookie(self, expire=0, domain="", path=""):
"""
:param expire: Number of minutes before this cookie goes stale
:param domain: The domain of the cookie
:param path: The path specification for the cookie
:return: A tuple to be added to headers
"""
cookie = SimpleCookie()
timestamp = str(int(time.time()))
signature = self.cookie_signature(self.sid, timestamp)
cookie[self.name] = "|".join([self.sid, timestamp, signature])
if path:
cookie[self.name]["path"] = path
if domain:
cookie[self.name]["domain"] = domain
if expire:
cookie[self.name]["expires"] = \
_expiration(expire, "%a, %d-%b-%Y %H:%M:%S CET")
return tuple(cookie.output().split(": ", 1))
def parse_cookie(self, value):
"""Parses and verifies a cookie value """
if not value:
return None
parts = value.split("|")
if len(parts) != 3:
return None
# verify the cookie signature
if self.cookie_signature(parts[0], parts[1]) != parts[2]:
raise Exception("Invalid cookie signature %r", value)
try:
return parts[0].strip()
except KeyError:
return None
def cookie_signature(self, *parts):
"""Generates a cookie signature.
"""
sha1 = hmac.new(self._secret, digestmod=hashlib.sha1)
for part in parts:
sha1.update(part)
return sha1.hexdigest()
def digest(item):
return hmac.new("1234", item, digestmod=hashlib.sha1).hexdigest()
class Session(object):
""" Knowledge connected to a specific authentication session """
def __init__(self, cache, group, session_id="", secret=""):
self.group = group
self._cache = cache
self._secret = secret
self.session_id = session_id
self.sid_digest = digest(session_id)
def cache_identity(self, session_id, identity, until):
self._cache.update(self.group, session_id, {"ava": identity})
# not_on_or_after =
# self.server.conf.idp_policy().policy.not_on_or_after()
self._cache.valid_to(self.group, session_id, until)
def remember(self, info, session_id=""):
if not session_id:
session_id = self.session_id
until = _expiration(30) # half a hour to log in ?!
self._cache.set(self.group, self.session_id, {"req": info}, until)
return self.session_id
def __setitem__(self, key, value):
self._cache.update(self.group, self.session_id, {key: value})
def __contains__(self, key):
if key in self._cache.get(self.group, self.session_id):
return True
else:
return False
def get(self, session_id=None):
""" Will raise an exception if the information is to old. """
if not session_id:
session_id = self.session_id
return self._cache.get(self.group, session_id)
def __getitem__(self, key):
try:
return self._cache.get(self.group, self.session_id)[key]
except (ValueError, KeyError):
return None
def __str__(self):
return "%s" % self.get()
def duplicate(self):
session = Session(self._cache, self.group)
session.session_id = self.session_id
return session
def reset(self):
self._cache.reset(self.group, self.session_id)
def valid_to(self, tid):
"""
:param tid: Number of seconds this information should be valid
"""
self._cache.valid_to(self.group, self.session_id, tid)
def keys(self):
try:
return self._cache.get(self.group, self.session_id).keys()
except (ValueError, KeyError):
return []
def active(self):
return self._cache.active(self.group, self.session_id)
def __eq__(self, other):
if self.group == other.group:
if self.session_id == other.session_id:
return True
return False
def info(self):
try:
return self._cache.get(self.group, self.session_id)["req"]
except (ValueError, KeyError):
return None
def authn_service(self):
return self._cache.get(self.group, self.session_id)["service"]
|
|
"""This file implements all-or-nothing package transformations.
An all-or-nothing package transformation is one in which some text is
transformed into message blocks, such that all blocks must be obtained before
the reverse transformation can be applied. Thus, if any blocks are corrupted
or lost, the original message cannot be reproduced.
An all-or-nothing package transformation is not encryption, although a block
cipher algorithm is used. The encryption key is randomly generated and is
extractable from the message blocks.
This class implements the All-Or-Nothing package transformation algorithm
described in:
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
http://theory.lcs.mit.edu/~rivest/fusion.pdf
"""
__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $"
import operator
import string
from Crypto.Util.number import bytes_to_long, long_to_bytes
from functools import reduce
class AllOrNothing:
"""Class implementing the All-or-Nothing package transform.
Methods for subclassing:
_inventkey(key_size):
Returns a randomly generated key. Subclasses can use this to
implement better random key generating algorithms. The default
algorithm is probably not very cryptographically secure.
"""
def __init__(self, ciphermodule, mode=None, IV=None):
"""AllOrNothing(ciphermodule, mode=None, IV=None)
ciphermodule is a module implementing the cipher algorithm to
use. It must provide the PEP272 interface.
Note that the encryption key is randomly generated
automatically when needed. Optional arguments mode and IV are
passed directly through to the ciphermodule.new() method; they
are the feedback mode and initialization vector to use. All
three arguments must be the same for the object used to create
the digest, and to undigest'ify the message blocks.
"""
self.__ciphermodule = ciphermodule
self.__mode = mode
self.__IV = IV
self.__key_size = ciphermodule.key_size
if self.__key_size == 0:
self.__key_size = 16
__K0digit = chr(0x69)
def digest(self, text):
"""digest(text:string) : [string]
Perform the All-or-Nothing package transform on the given
string. Output is a list of message blocks describing the
transformed text, where each block is a string of bit length equal
to the ciphermodule's block_size.
"""
# generate a random session key and K0, the key used to encrypt the
# hash blocks. Rivest calls this a fixed, publically-known encryption
# key, but says nothing about the security implications of this key or
# how to choose it.
key = self._inventkey(self.__key_size)
K0 = self.__K0digit * self.__key_size
# we need two cipher objects here, one that is used to encrypt the
# message blocks and one that is used to encrypt the hashes. The
# former uses the randomly generated key, while the latter uses the
# well-known key.
mcipher = self.__newcipher(key)
hcipher = self.__newcipher(K0)
# Pad the text so that its length is a multiple of the cipher's
# block_size. Pad with trailing spaces, which will be eliminated in
# the undigest() step.
block_size = self.__ciphermodule.block_size
padbytes = block_size - (len(text) % block_size)
text = text + ' ' * padbytes
# Run through the algorithm:
# s: number of message blocks (size of text / block_size)
# input sequence: m1, m2, ... ms
# random key K' (`key' in the code)
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
#
# The one complication I add is that the last message block is hard
# coded to the number of padbytes added, so that these can be stripped
# during the undigest() step
s = len(text) / block_size
blocks = []
hashes = []
for i in range(1, s+1):
start = (i-1) * block_size
end = start + block_size
mi = text[start:end]
assert len(mi) == block_size
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate the hash block for this block
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Add the padbytes length as a message block
i = i + 1
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = padbytes ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate this block's hash
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Now calculate the last message block of the sequence 1..s'. This
# will contain the random session key XOR'd with all the hash blocks,
# so that for undigest(), once all the hash blocks are calculated, the
# session key can be trivially extracted. Calculating all the hash
# blocks requires that all the message blocks be received, thus the
# All-or-Nothing algorithm succeeds.
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
blocks.append(mtick_stick)
# we convert the blocks to strings since in Python, byte sequences are
# always represented as strings. This is more consistent with the
# model that encryption and hash algorithms always operate on strings.
return list(map(long_to_bytes, blocks))
def undigest(self, blocks):
"""undigest(blocks : [string]) : string
Perform the reverse package transformation on a list of message
blocks. Note that the ciphermodule used for both transformations
must be the same. blocks is a list of strings of bit length
equal to the ciphermodule's block_size.
"""
# better have at least 2 blocks, for the padbytes package and the hash
# block accumulator
if len(blocks) < 2:
raise ValueError("List must be at least length 2.")
# blocks is a list of strings. We need to deal with them as long
# integers
blocks = list(map(bytes_to_long, blocks))
# Calculate the well-known key, to which the hash blocks are
# encrypted, and create the hash cipher.
K0 = self.__K0digit * self.__key_size
hcipher = self.__newcipher(K0)
# Since we have all the blocks (or this method would have been called
# prematurely), we can calcualte all the hash blocks.
hashes = []
for i in range(1, len(blocks)):
mticki = blocks[i-1] ^ i
hi = hcipher.encrypt(long_to_bytes(mticki))
hashes.append(bytes_to_long(hi))
# now we can calculate K' (key). remember the last block contains
# m's' which we don't include here
key = blocks[-1] ^ reduce(operator.xor, hashes)
# and now we can create the cipher object
mcipher = self.__newcipher(long_to_bytes(key))
block_size = self.__ciphermodule.block_size
# And we can now decode the original message blocks
parts = []
for i in range(1, len(blocks)):
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
parts.append(mi)
# The last message block contains the number of pad bytes appended to
# the original text string, such that its length was an even multiple
# of the cipher's block_size. This number should be small enough that
# the conversion from long integer to integer should never overflow
padbytes = int(parts[-1])
text = string.join(list(map(long_to_bytes, parts[:-1])), '')
return text[:-padbytes]
def _inventkey(self, key_size):
# TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's
# kernelrand module
import time
from Crypto.Util import randpool
# TBD: key_size * 2 to work around possible bug in RandomPool?
pool = randpool.RandomPool(key_size * 2)
while key_size > pool.entropy:
pool.add_event()
# we now have enough entropy in the pool to get a key_size'd key
return pool.get_bytes(key_size)
def __newcipher(self, key):
if self.__mode is None and self.__IV is None:
return self.__ciphermodule.new(key)
elif self.__IV is None:
return self.__ciphermodule.new(key, self.__mode)
else:
return self.__ciphermodule.new(key, self.__mode, self.__IV)
if __name__ == '__main__':
import sys
import getopt
import base64
usagemsg = '''\
Test module usage: %(program)s [-c cipher] [-l] [-h]
Where:
--cipher module
-c module
Cipher module to use. Default: %(ciphermodule)s
--aslong
-l
Print the encoded message blocks as long integers instead of base64
encoded strings
--help
-h
Print this help message
'''
ciphermodule = 'AES'
aslong = 0
def usage(code, msg=None):
if msg:
print(msg)
print(usagemsg % {'program': sys.argv[0],
'ciphermodule': ciphermodule})
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:],
'c:l', ['cipher=', 'aslong'])
except getopt.error as msg:
usage(1, msg)
if args:
usage(1, 'Too many arguments')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-c', '--cipher'):
ciphermodule = arg
elif opt in ('-l', '--aslong'):
aslong = 1
# ugly hack to force __import__ to give us the end-path module
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
a = AllOrNothing(module)
print('Original text:\n==========')
print(__doc__)
print('==========')
msgblocks = a.digest(__doc__)
print('message blocks:')
for i, blk in map(None, list(range(len(msgblocks))), msgblocks):
# base64 adds a trailing newline
print(' %3d' % i, end=' ')
if aslong:
print(bytes_to_long(blk))
else:
print(base64.encodestring(blk)[:-1])
#
# get a new undigest-only object so there's no leakage
b = AllOrNothing(module)
text = b.undigest(msgblocks)
if text == __doc__:
print('They match!')
else:
print('They differ!')
|
|
import operator
from merc import channel
from merc import errors
from merc import feature
from merc import message
from merc import mode
MAX_MODES = 4
class ModeFeature(feature.Feature):
NAME = __name__
install = ModeFeature.install
def show_modes(target, modes):
flags = []
args = []
for k, mode_factory in sorted(modes.items(), key=operator.itemgetter(0)):
m = mode_factory(target)
value = m.get()
if value:
flags.append(m.CHAR)
if value is not True:
args.append(str(value))
return "+" + "".join(flags), args
class UmodeIs(message.Reply):
NAME = "221"
MIN_ARITY = 1
def __init__(self, flags, *args):
self.flags = flags
self.args = list(args)
def as_reply_params(self):
return [self.flags] + self.args
class ChannelModeIs(message.Reply):
NAME = "324"
MIN_ARITY = 2
def __init__(self, channel_name, flags, *args):
self.channel_name = channel_name
self.flags = flags
self.args = list(args)
def as_reply_params(self):
return [self.channel_name, self.flags] + self.args
class CreationTime(message.Reply):
NAME = "329"
MIN_ARITY = 2
def __init__(self, channel_name, time, *args):
self.channel_name = channel_name
self.time = time
def as_reply_params(self):
return [self.channel_name, self.time]
class _Mode(message.Command):
def __init__(self, target, flags=None, *args):
self.target = target
self.flags = flags
self.args = list(args)
def as_command_params(self):
return [self.target, self.flags] + list(self.args)
@staticmethod
def _expand_modes(flags, args, modes):
expanded = []
args_iter = iter(args)
op = "+"
for c in flags:
if len(expanded) > MAX_MODES:
break
if c in "+-":
op = c
continue
arg = None
try:
m = modes[c]
except KeyError:
raise errors.UnknownMode(c)
if m.TAKES_PARAM:
try:
arg = next(args_iter)
except StopIteration:
pass
expanded.append((m, op, arg))
return expanded
@staticmethod
def _coalesce_modes(applied):
flags = ""
args = []
last_op = None
for m, op, arg in applied:
if op != last_op:
flags += op
last_op = op
flags += m.CHAR
if arg is not None:
args.append(str(arg))
return flags, args
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
applied = []
if channel.Channel.is_channel_name(self.target):
try:
chan = app.channels.get(self.target)
except errors.NoSuchNick:
raise errors.NoSuchChannel(self.target)
expanded = self._expand_modes(self.flags, self.args,
app.channels.modes)
self.check_can_set_channel_modes(app, user, chan, expanded)
for mode_factory, op, arg in expanded:
m = mode_factory(chan)
if (op == "+" and m.set(app, user, arg)) or \
(op == "-" and m.unset(app, user, arg)):
applied.append((mode_factory, op, arg))
if applied:
flags, args = self._coalesce_modes(applied)
chan.broadcast(None, self.get_prefix(app, user),
Mode(chan.name, flags, *args))
else:
target = app.users.get(self.target)
try:
expanded = self._expand_modes(self.flags, self.args,
app.users.modes)
except errors.UnknownMode as e:
raise errors.UmodeUnknownFlag(e.param)
self.check_can_set_user_modes(app, user, target, expanded)
for mode_factory, op, arg in expanded:
m = mode_factory(target)
if (op == "+" and m.set(app, target, arg)) or \
(op == "-" and m.unset(app, target, arg)):
applied.append((mode_factory, op, arg))
if applied:
flags, args = self._coalesce_modes(applied)
target.send(self.get_prefix(app, user),
Mode(target.nickname, flags, *args))
@ModeFeature.register_user_command
class Mode(_Mode):
NAME = "MODE"
MIN_ARITY = 1
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
if self.flags is None:
if channel.Channel.is_channel_name(self.target):
try:
chan = app.channels.get(self.target)
except errors.NoSuchNick:
raise errors.NoSuchChannel(self.target)
flags, args = show_modes(chan, app.channels.modes)
user.send_reply(ChannelModeIs(chan.name, flags, *args))
else:
target = app.users.get(self.target)
if target is not user:
raise errors.UsersDontMatch
flags, args = show_modes(target, app.users.modes)
user.send_reply(UmodeIs(flags, *args))
else:
super().handle_for(app, user, prefix)
def check_can_set_channel_modes(self, app, user, channel, modes):
for m, op, arg in modes:
if issubclass(m, mode.ListMode) and arg is None:
continue
m(channel).check(app, user, arg)
return
def check_can_set_user_modes(self, app, user, target, modes):
for m, op, arg in modes:
if issubclass(m, mode.ListMode) and arg is None:
continue
m(target).check(app, user, arg)
return
def get_prefix(self, app, user):
return user.hostmask
@ModeFeature.register_user_command
class SAMode(_Mode):
NAME = "SAMODE"
MIN_ARITY = 2
def check_can_set_channel_modes(self, app, user, channel, modes):
user.check_is_irc_operator()
def check_can_set_user_modes(self, app, user, target, modes):
user.check_is_irc_operator()
def get_prefix(self, app, user):
return app.server.name
@ModeFeature.hook("user.welcome")
def send_modes_on_welcome(app, user):
flags, args = show_modes(user, app.users.modes)
if flags != "+":
user.send(user.prefix, Mode(user.nickname, flags, *args))
@ModeFeature.hook("channel.join")
def send_timestamp_on_join(app, user, target, channel):
target.send_reply(CreationTime(channel.name,
str(int(channel.creation_time.timestamp()))))
@ModeFeature.hook("channel.join_new")
def send_channel_modes_on_new_join(app, user, target, channel):
flags, args = show_modes(channel, app.channels.modes)
target.send_reply(Mode(channel.name, flags, *args))
@ModeFeature.hook("user.mode_change")
def send_mode_on_user_mode_change(app, user, applied):
flags, args = Mode._coalesce_modes(applied)
user.send(user.prefix, Mode(user.nickname, flags, *args))
@ModeFeature.hook("server.isupport.modify")
def modify_isupport(app, isupport):
list_modes = set()
param_modes = set()
set_with_param_modes = set()
flag_modes = set()
for m in app.channels.modes.values():
if issubclass(m, mode.ListMode):
list_modes.add(m.CHAR)
elif issubclass(m, mode.ParamMode):
param_modes.add(m.CHAR)
elif issubclass(m, mode.SetWithParamMode):
set_with_param_modes.add(m.CHAR)
elif issubclass(m, mode.FlagMode):
flag_modes.add(m.CHAR)
isupport["MODES"] = MAX_MODES
isupport["CHANMODES"] = ",".join(["".join(sorted(list_modes)),
"".join(sorted(param_modes)),
"".join(sorted(set_with_param_modes)),
"".join(sorted(flag_modes))])
isupport["MAXLIST"] = "{}:{}".format("".join(sorted(list_modes)),
mode.ListMode.MAX_ITEMS)
|
|
import numpy as np
from .base import PlotChecker, InvalidPlotError
class BarPlotChecker(PlotChecker):
"""A plot checker for bar plots.
Parameters
----------
axis : ``matplotlib.axes.Axes`` object
A set of matplotlib axes (e.g. obtained through ``plt.gca()``)
"""
def __init__(self, axis):
"""Initialize the bar plot checker."""
super(BarPlotChecker, self).__init__(axis)
self._patches = np.array(self.axis.patches)
self._patches = self._patches[np.argsort([p.get_x() for p in self._patches])]
if len(self._patches) == 0:
raise InvalidPlotError("no data found")
def _parse_expected_attr(self, attr_name, attr_val):
"""Ensure that the given expected attribute values are in the right shape."""
if attr_name in ('colors', 'edgecolors'):
# if it's a color, first check if it's just a single color -- if it's
# not a single color, this command will throw an error and we can try
# iterating over the multiple colors that were given
try:
attr_val = np.array([self._color2rgb(attr_val)])
except (ValueError, TypeError):
attr_val = np.array([self._color2rgb(x) for x in attr_val])
elif not hasattr(attr_val, '__iter__'):
# if it's not a color, then just make sure we have an array
attr_val = np.array([attr_val])
# tile the given values if we've only been given one, so it's the same
# shape as the data
if len(attr_val) == 1:
attr_val = self._tile_or_trim(self.centers, attr_val)
return attr_val
def assert_num_bars(self, num_bars):
"""Assert that the plot has the given number of bars.
Parameters
----------
num_bars : int
"""
if num_bars != len(self._patches):
raise AssertionError(
"Plot has incorrect number of bars: {} (expected {})".format(
len(self._patches), num_bars))
@property
def centers(self):
"""The centers of the plotted bars."""
return np.array([p.get_x() + (p.get_width() / 2) for p in self._patches])
def assert_centers_equal(self, centers):
"""Assert that the given centers are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.centers`.
Parameters
----------
centers : 1-D array-like
The expected centers. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
"""
np.testing.assert_equal(
self.centers,
self._parse_expected_attr("centers", centers))
def assert_centers_allclose(self, centers, **kwargs):
"""Assert that the given centers are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.centers`.
Parameters
----------
centers : 1-D array-like
The expected centers. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.centers,
self._parse_expected_attr("centers", centers),
**kwargs)
@property
def heights(self):
"""The heights of the plotted bars."""
return np.array([p.get_height() for p in self._patches])
def assert_heights_equal(self, heights):
"""Assert that the given heights are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.heights`.
Parameters
----------
heights : 1-D array-like
The expected heights. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
"""
np.testing.assert_equal(
self.heights,
self._parse_expected_attr("heights", heights))
def assert_heights_allclose(self, heights, **kwargs):
"""Assert that the given heights are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.heights`.
Parameters
----------
heights : 1-D array-like
The expected heights. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.heights,
self._parse_expected_attr("heights", heights),
**kwargs)
@property
def widths(self):
"""The widths of the plotted bars."""
return np.array([p.get_width() for p in self._patches])
def assert_widths_equal(self, widths):
"""Assert that the given widths are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.widths`.
Parameters
----------
widths : 1-D array-like
The expected widths. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
"""
np.testing.assert_equal(
self.widths,
self._parse_expected_attr("widths", widths))
def assert_widths_allclose(self, widths, **kwargs):
"""Assert that the given widths are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.widths`.
Parameters
----------
widths : 1-D array-like
The expected widths. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.widths,
self._parse_expected_attr("widths", widths),
**kwargs)
@property
def bottoms(self):
"""The y-coordinates of the bottoms of the plotted bars."""
return np.array([p.get_y() for p in self._patches])
def assert_bottoms_equal(self, bottoms):
"""Assert that the given bottoms are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.bottoms`.
Parameters
----------
bottoms : 1-D array-like
The expected bottoms. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
"""
np.testing.assert_equal(
self.bottoms,
self._parse_expected_attr("bottoms", bottoms))
def assert_bottoms_allclose(self, bottoms, **kwargs):
"""Assert that the given bottoms are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.bottoms`.
Parameters
----------
bottoms : 1-D array-like
The expected bottoms. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.bottoms,
self._parse_expected_attr("bottoms", bottoms),
**kwargs)
@property
def colors(self):
"""The colors of the plotted bars."""
return np.array([self._color2rgb(p.get_facecolor()) for p in self._patches])
def assert_colors_equal(self, colors):
"""Assert that the given colors are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.colors`.
Parameters
----------
colors : single color, or list of expected colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
"""
np.testing.assert_equal(
self.colors,
self._parse_expected_attr("colors", colors))
def assert_colors_allclose(self, colors, **kwargs):
"""Assert that the given colors are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.colors`.
Parameters
----------
colors : single color, or list of expected edge colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.colors,
self._parse_expected_attr("colors", colors),
**kwargs)
@property
def edgecolors(self):
"""The edge colors of the plotted bars."""
return np.array([self._color2rgb(p.get_edgecolor()) for p in self._patches])
def assert_edgecolors_equal(self, edgecolors):
"""Assert that the given edgecolors are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.edgecolors`.
Parameters
----------
edgecolors : single color, or list of expected edge colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
"""
np.testing.assert_equal(
self.edgecolors,
self._parse_expected_attr("edgecolors", edgecolors))
def assert_edgecolors_allclose(self, edgecolors, **kwargs):
"""Assert that the given edgecolors are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.edgecolors`.
Parameters
----------
edgecolors : single color, or list of expected edge colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.edgecolors,
self._parse_expected_attr("edgecolors", edgecolors),
**kwargs)
@property
def alphas(self):
"""The alpha values of the plotted bars."""
all_alphas = []
for p in self._patches:
if p.get_alpha() is None:
alpha = self._color2alpha(p.get_facecolor())
else:
alpha = p.get_alpha()
all_alphas.append(alpha)
return np.array(all_alphas)
def assert_alphas_equal(self, alphas):
"""Assert that the given alphas are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.alphas`.
Parameters
----------
alphas : 1-D array-like
The expected alphas. The number of elements should be equal to
the (expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
"""
np.testing.assert_equal(
self.alphas,
self._parse_expected_attr("alphas", alphas))
def assert_alphas_allclose(self, alphas, **kwargs):
"""Assert that the given alphas are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.alphas`.
Parameters
----------
alphas : 1-D array-like
The expected alphas. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.alphas,
self._parse_expected_attr("alphas", alphas),
**kwargs)
@property
def linewidths(self):
"""The line widths of the plotted bars."""
return np.array([p.get_linewidth() for p in self._patches])
def assert_linewidths_equal(self, linewidths):
"""Assert that the given linewidths are equivalent to the plotted
:attr:`~plotchecker.BarPlotChecker.linewidths`.
Parameters
----------
linewidths : 1-D array-like
The expected linewidths. The number of elements should be equal to
the (expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
"""
np.testing.assert_equal(
self.linewidths,
self._parse_expected_attr("linewidths", linewidths))
def assert_linewidths_allclose(self, linewidths, **kwargs):
"""Assert that the given linewidths are almost equal to the plotted
:attr:`~plotchecker.BarPlotChecker.linewidths`.
Parameters
----------
linewidths : 1-D array-like
The expected linewidths. The number of elements should be equal to the
(expected) number of plotted bars, or just a single value (which
will then be applied to all bars).
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.linewidths,
self._parse_expected_attr("linewidths", linewidths),
**kwargs)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks in multi-worker training with TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distributed_file_utils
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.keras import callbacks
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.keras.distribute import multi_worker_training_state as training_state
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
def _model_setup(test_obj, file_format):
"""Set up a MNIST Keras model for testing purposes.
This function builds a MNIST Keras model and returns relevant information
for testing.
Args:
test_obj: The `TestCase` testing object.
file_format: File format for checkpoints. 'tf' or 'h5'.
Returns:
A tuple of (model, saving_filepath, train_ds, steps) where train_ds is
the training dataset.
"""
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Pass saving_filepath from the parent thread to ensure every worker has the
# same filepath to save.
saving_filepath = os.path.join(test_obj.get_temp_dir(),
'checkpoint.' + file_format)
return model, saving_filepath, train_ds, steps
class KerasCallbackMultiProcessTest(parameterized.TestCase, test.TestCase):
@combinations.generate(
combinations.combine(
mode=['eager'],
file_format=['h5', 'tf'],
save_weights_only=[True, False]))
def test_model_checkpoint_saves_on_chief_but_not_otherwise(
self, file_format, mode, save_weights_only):
def proc_model_checkpoint_saves_on_chief_but_not_otherwise(
test_obj, file_format):
model, saving_filepath, train_ds, steps = _model_setup(
test_obj, file_format)
num_epoch = 2
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves checkpoint but non-chief doesn't.
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' %
(test_base.get_task_type(), test_base.get_task_index(), extension))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=train_ds,
validation_steps=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=save_weights_only)
])
# If it's chief, the model should be saved; if not, the model shouldn't.
test_obj.assertEqual(
training_state.checkpoint_exists(saving_filepath),
test_base.is_chief())
# If it's chief, the model should be saved (`write_filepath` should
# simply return `saving_filepath`); if not, i.e. for non-chief workers,
# the temporary path generated by `write_filepath` should no longer
# contain the checkpoint that has been deleted.
test_obj.assertEqual(
training_state.checkpoint_exists(
distributed_file_utils.write_filepath(
saving_filepath, model._distribution_strategy)),
test_base.is_chief())
multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, file_format))
@combinations.generate(combinations.combine(mode=['eager']))
def test_model_checkpoint_works_with_same_file_path(self, mode):
def proc_model_checkpoint_works_with_same_file_path(
test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)])
test_obj.assertTrue(file_io.file_exists(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint')
multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@combinations.generate(combinations.combine(mode=['eager']))
def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode):
def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves summaries but non-chief doesn't.
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'logfile_%s_%d' %
(test_base.get_task_type(), test_base.get_task_index()))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
# If it's chief, the summaries should be saved in the filepath; if not,
# the directory should be empty (although created). Using
# `file_io.list_directory()` since the directory may be created at this
# point.
test_obj.assertEqual(
bool(file_io.list_directory(saving_filepath)), test_base.is_chief())
multi_process_runner.run(
proc_tensorboard_saves_on_chief_but_not_otherwise,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,))
@combinations.generate(combinations.combine(mode=['eager']))
def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode):
def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
saving_filepath = os.path.join(test_obj.get_temp_dir(),
'logfile_%s' % (test_base.get_task_type()))
saving_filepath_for_temp = os.path.join(saving_filepath, 'workertemp_1')
os.mkdir(saving_filepath)
os.mkdir(saving_filepath_for_temp)
# Verifies that even if `saving_filepath_for_temp` exists, tensorboard
# can still save to temporary directory.
test_obj.assertTrue(file_io.file_exists(saving_filepath_for_temp))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
multi_process_runner.run(
proc_tensorboard_can_still_save_to_temp_even_if_it_exists,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,))
@combinations.generate(combinations.combine(mode=['eager']))
def test_tensorboard_works_with_same_file_path(self, mode):
def proc_tensorboard_works_with_same_file_path(test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists(saving_filepath))
multi_process_runner.barrier().wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
multi_process_runner.barrier().wait()
test_obj.assertTrue(file_io.list_directory(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'logfile')
multi_process_runner.run(
proc_tensorboard_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@combinations.generate(combinations.combine(mode=['eager']))
def test_early_stopping(self, mode):
def proc_early_stopping(test_obj):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor='loss', min_delta=0.05, patience=1, verbose=1),
epoch_counter_cbk
]
# Empirically, it is expected that `model.fit()` terminates around the
# 22th epoch. Asserting that it should have been stopped before the 50th
# epoch to avoid flakiness and be more predictable.
model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
multi_process_runner.run(
proc_early_stopping,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,))
if __name__ == '__main__':
multi_process_runner.test_main(barrier_parties=2)
|
|
#!/usr/bin/env python
#
# License: BSD
# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE
#
##############################################################################
# Imports
##############################################################################
import py_trees
import py_trees.console as console
from nose.tools import assert_raises
import time
##############################################################################
# Logging Level
##############################################################################
py_trees.logging.level = py_trees.logging.Level.DEBUG
logger = py_trees.logging.Logger("Nosetest")
##############################################################################
# Classes
##############################################################################
class InvalidSetup(py_trees.behaviour.Behaviour):
def setup(self, timeout):
# A common mistake is to forget to return a boolean value
# Composite behaviours will at least check to make sure that
# their children do so and raise TypeError's if they fail
# to do so.
pass
class DummyDecorator(py_trees.decorators.Decorator):
def __init__(self, child, name=py_trees.common.Name.AUTO_GENERATED):
super(DummyDecorator, self).__init__(name=name, child=child)
##############################################################################
# Tests
##############################################################################
def test_set_name():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Set Name" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
child = py_trees.behaviours.Success(name="Woohoo")
named_decorator = DummyDecorator(name="Foo", child=child)
no_named_decorator = DummyDecorator(child=child)
print("\n--------- Assertions ---------\n")
print("named_decorator.name == Foo")
assert(named_decorator.name == "Foo")
print("no_named_decorator.name == DummyDecorator\\n[Woohoo]")
assert(no_named_decorator.name == "DummyDecorator\n[Woohoo]")
def test_invalid_child():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Invalid Child" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
print("\n--------- Assertions ---------\n")
print("TypeError is raised")
assert_raises(TypeError, DummyDecorator.__init__, child=5)
def test_invalid_setup():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Invalid Setup" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
parent = py_trees.decorators.Decorator(
name="Decorator",
child=InvalidSetup(name="Invalid Setup")
)
print("\n--------- Assertions ---------\n")
print("TypeError is raised")
with assert_raises(TypeError) as context:
parent.setup(timeout=15)
print("TypeError has message with substring 'NoneType'")
assert("NoneType" in str(context.exception))
def test_failure_is_success_tree():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Failure is Success Tree" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Selector(name="Root")
failure = py_trees.behaviours.Failure(name="Failure")
failure_is_success = py_trees.decorators.FailureIsSuccess(
child=py_trees.behaviours.Failure()
)
root.add_child(failure)
root.add_child(failure_is_success)
py_trees.display.print_ascii_tree(root)
visitor = py_trees.visitors.DebugVisitor()
py_trees.tests.tick_tree(root, 1, 1, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("root.status == py_trees.common.Status.SUCCESS")
assert(root.status == py_trees.common.Status.SUCCESS)
print("failure.status == py_trees.common.Status.FAILURE")
assert(failure.status == py_trees.common.Status.FAILURE)
print("failure_is_success.status == py_trees.common.Status.SUCCESS")
assert(failure_is_success.status == py_trees.common.Status.SUCCESS)
def test_success_is_failure_tree():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Success is Failure Tree" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Selector("Root")
failure = py_trees.behaviours.Failure(name="Failure")
success_is_failure = py_trees.decorators.SuccessIsFailure(
name="Success Is Failure",
child=py_trees.behaviours.Success()
)
root.add_child(failure)
root.add_child(success_is_failure)
py_trees.display.print_ascii_tree(root)
visitor = py_trees.visitors.DebugVisitor()
py_trees.tests.tick_tree(root, 1, 1, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("failure.status == py_trees.common.Status.FAILURE")
assert(failure.status == py_trees.common.Status.FAILURE)
print("success_is_failure.status == py_trees.common.Status.FAILURE")
assert(success_is_failure.status == py_trees.common.Status.FAILURE)
print("root.status == py_trees.common.Status.FAILURE")
assert(root.status == py_trees.common.Status.FAILURE)
def test_inverter():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Inverter" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Sequence(name="Root")
selector = py_trees.composites.Selector(name="Selector")
failure = py_trees.behaviours.Failure(name="Failure")
success_inverter = py_trees.decorators.Inverter(child=py_trees.behaviours.Success())
success = py_trees.behaviours.Success(name="Success")
failure_inverter = py_trees.decorators.Inverter(py_trees.behaviours.Failure())
selector.add_child(failure)
selector.add_child(success_inverter)
selector.add_child(success)
root.add_child(selector)
root.add_child(failure_inverter)
py_trees.display.print_ascii_tree(root)
visitor = py_trees.visitors.DebugVisitor()
for i in range(0,2):
py_trees.tests.tick_tree(root, i, i, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("success.status == py_trees.common.Status.SUCCESS")
assert(success.status == py_trees.common.Status.SUCCESS)
print("failure_inverter.status == py_trees.common.Status.SUCCESS")
assert(failure_inverter.status == py_trees.common.Status.SUCCESS)
print("root.status == py_trees.common.Status.SUCCESS")
assert(root.status == py_trees.common.Status.SUCCESS)
print("failure.status == py_trees.common.Status.FAILURE")
assert(failure.status == py_trees.common.Status.FAILURE)
print("success_inverter.status == py_trees.common.Status.FAILURE")
assert(success_inverter.status == py_trees.common.Status.FAILURE)
def test_running_is_failure_tree():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Running is Failure Tree" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.Selector(name="Root")
running = py_trees.meta.running_is_failure(py_trees.behaviours.Running)(name="Running")
failure = py_trees.meta.running_is_failure(py_trees.behaviours.Failure)(name="Failure")
success = py_trees.meta.running_is_failure(py_trees.behaviours.Success)(name="Success")
root.add_child(running)
root.add_child(failure)
root.add_child(success)
py_trees.display.print_ascii_tree(root)
visitor = py_trees.visitors.DebugVisitor()
py_trees.tests.tick_tree(root, 1, 1, visitor)
print("\n--------- Assertions ---------\n")
print("running.status == py_trees.Status.FAILURE")
assert(running.status == py_trees.Status.FAILURE)
print("failure.status == py_trees.Status.FAILURE")
assert(failure.status == py_trees.Status.FAILURE)
print("success.status == py_trees.Status.SUCCESS")
assert(success.status == py_trees.Status.SUCCESS)
print("root.status == py_trees.Status.SUCCESS")
assert(root.status == py_trees.Status.SUCCESS)
def test_inverter_sequence():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Inverter Sequence Tree" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Selector(name="Root")
running = py_trees.decorators.RunningIsFailure(
child=py_trees.behaviours.Running()
)
failure = py_trees.decorators.RunningIsFailure(
child=py_trees.behaviours.Failure()
)
success = py_trees.decorators.RunningIsFailure(
child=py_trees.behaviours.Success()
)
root.add_child(running)
root.add_child(failure)
root.add_child(success)
py_trees.display.print_ascii_tree(root)
visitor = py_trees.visitors.DebugVisitor()
py_trees.tests.tick_tree(root, 1, 1, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("running.status == py_trees.common.Status.FAILURE")
assert(running.status == py_trees.common.Status.FAILURE)
print("failure.status == py_trees.common.Status.FAILURE")
assert(failure.status == py_trees.common.Status.FAILURE)
print("success.status == py_trees.common.Status.SUCCESS")
assert(success.status == py_trees.common.Status.SUCCESS)
print("root.status == py_trees.common.Status.SUCCESS")
assert(root.status == py_trees.common.Status.SUCCESS)
def test_timeout():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Timeout" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
running = py_trees.behaviours.Running(name="Running")
timeout = py_trees.decorators.Timeout(child=running, duration=0.2)
py_trees.display.print_ascii_tree(timeout)
visitor = py_trees.visitors.DebugVisitor()
# Test that it times out and re-initialises properly
for i in range(0,2):
py_trees.tests.tick_tree(timeout, 2*i+1, 2*i+1, visitor)
print("\n--------- Assertions ---------\n")
print("timeout.status == py_trees.common.Status.RUNNING")
assert(timeout.status == py_trees.common.Status.RUNNING)
print("running.status == py_trees.common.Status.RUNNING")
assert(running.status == py_trees.common.Status.RUNNING)
time.sleep(0.3)
py_trees.tests.tick_tree(timeout, 2*i+2, 2*i+2, visitor)
print("\n--------- Assertions ---------\n")
print("timeout.status == py_trees.common.Status.FAILURE")
assert(timeout.status == py_trees.common.Status.FAILURE)
print("running.status == py_trees.common.Status.INVALID")
assert(running.status == py_trees.common.Status.INVALID)
# test that it passes on success
count = py_trees.behaviours.Count(name="Count", fail_until=0, running_until=1, success_until=10, reset=False)
timeout = py_trees.decorators.Timeout(child=count, duration=0.2)
py_trees.display.print_ascii_tree(timeout)
py_trees.tests.tick_tree(timeout, 1, 1, visitor)
print("\n--------- Assertions ---------\n")
print("timeout.status == py_trees.common.Status.RUNNING")
assert(timeout.status == py_trees.common.Status.RUNNING)
print("count.status == py_trees.common.Status.RUNNING")
assert(count.status == py_trees.common.Status.RUNNING)
py_trees.tests.tick_tree(timeout, 2, 2, visitor)
print("\n--------- Assertions ---------\n")
print("timeout.status == py_trees.common.Status.SUCCESS")
assert(timeout.status == py_trees.common.Status.SUCCESS)
print("count.status == py_trees.common.Status.SUCCESS")
assert(count.status == py_trees.common.Status.SUCCESS)
# test that it passes on failure
failure = py_trees.behaviours.Failure()
timeout = py_trees.decorators.Timeout(child=failure, duration=0.2)
py_trees.display.print_ascii_tree(timeout)
py_trees.tests.tick_tree(timeout, 1, 1, visitor)
print("\n--------- Assertions ---------\n")
print("timeout.status == py_trees.common.Status.FAILURE")
assert(timeout.status == py_trees.common.Status.FAILURE)
print("failure.status == py_trees.common.Status.FAILURE")
assert(failure.status == py_trees.common.Status.FAILURE)
def test_condition():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Condition" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
child = py_trees.behaviours.Count(
name="Count",
fail_until=2,
running_until=2,
success_until=10,
reset=True
)
condition = py_trees.decorators.Condition(
child=child,
status=py_trees.common.Status.SUCCESS
)
visitor = py_trees.visitors.DebugVisitor()
py_trees.tests.tick_tree(condition, 1, 1, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("child.status == py_trees.common.Status.FAILURE")
assert(child.status == py_trees.common.Status.FAILURE)
print("condition.status == py_trees.common.Status.RUNNING")
assert(condition.status == py_trees.common.Status.RUNNING)
py_trees.tests.tick_tree(condition, 2, 2, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("child.status == py_trees.common.Status.FAILURE")
assert(child.status == py_trees.common.Status.FAILURE)
print("condition.status == py_trees.common.Status.RUNNING")
assert(condition.status == py_trees.common.Status.RUNNING)
py_trees.tests.tick_tree(condition, 3, 3, visitor, print_snapshot=True)
print("\n--------- Assertions ---------\n")
print("child.status == py_trees.common.Status.SUCCESS")
assert(child.status == py_trees.common.Status.SUCCESS)
print("condition.status == py_trees.common.Status.SUCCESS")
assert(condition.status == py_trees.common.Status.SUCCESS)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def fit_distributed(model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for Distribution Strategies."""
distributed_training_utils.validate_callbacks(callbacks, model.optimizer)
distributed_training_utils.validate_inputs(
x, y, model._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the costraint to consume all the training samples.
steps_per_epoch, batch_size = (
distributed_training_utils.get_input_params(
model._distribution_strategy, first_x_value, steps_per_epoch,
batch_size, mode=ModeKeys.TRAIN))
batch_size = model._validate_or_infer_batch_size(
batch_size, steps_per_epoch, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle)
val_dataset = None
if validation_data:
val_x, val_y, val_sample_weights = model._unpack_validation_data(
validation_data)
distributed_training_utils.validate_inputs(
val_x, val_y, model._distribution_strategy)
first_valx_value = nest.flatten(val_x)[0]
if isinstance(first_valx_value, np.ndarray):
validation_steps, _ = distributed_training_utils.get_input_params(
model._distribution_strategy, first_valx_value, validation_steps,
batch_size)
val_dataset = model._distribution_standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weights,
class_weight=None,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle)
elif validation_split:
raise ValueError('validation_split argument is not supported with '
'distribution strategies.')
if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
return experimental_tpu_fit_loop(
model,
dataset,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_dataset=val_dataset,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq)
else:
return training_arrays.fit_loop(
model,
dataset,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_dataset,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
steps_name='steps_per_epoch')
def evaluate_distributed(model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None):
"""Evaluate loop for Distribution Strategies."""
distributed_training_utils.validate_inputs(x, y, model._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
steps, batch_size = distributed_training_utils.get_input_params(
model._distribution_strategy, first_x_value, steps, batch_size)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
batch_size=batch_size)
if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
return experimental_tpu_test_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
else:
return training_arrays.test_loop(
model,
inputs=dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def predict_distributed(model,
x=None,
batch_size=None,
verbose=0,
steps=None,
callbacks=None):
"""Predict loop for Distribution Strategies."""
distributed_training_utils.validate_inputs(
x, None, model._distribution_strategy, allow_partial_batch=True)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
steps, batch_size = distributed_training_utils.get_input_params(
model._distribution_strategy, first_x_value, steps,
batch_size, mode=ModeKeys.PREDICT)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x,
batch_size=batch_size,
repeat=False,
allow_partial_batch=True)
if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
return experimental_tpu_predict_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
else:
return training_arrays.predict_loop(
model,
dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def _make_step_fn(model, mode, strategy, output_labels):
"""Create step fn.
Arguments:
model: a Keras Model instance.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
strategy: a `tf.distribute.Strategy` instance.
output_labels: the output labels for the step function.
Returns:
A step function to run by `tf.distribute.Strategy`.
"""
def _per_device_execution_function(model):
exec_func = model._make_execution_function(mode)
return (exec_func.inputs, exec_func.outputs, exec_func.updates_op,
exec_func.session_kwargs)
def step_fn(ctx, inputs):
"""A step fn that returns update ops."""
if mode == ModeKeys.PREDICT:
targets = None
else:
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, strategy, mode, inputs=inputs, targets=targets)
else:
distributed_training_utils._build_distributed_network(
model, strategy, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_device_execution_function,
args=(distributed_training_utils.get_distributed_model(model, mode),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_' + str(mode) + '_function',
**all_session_args)
for label, output in zip(output_labels, combined_fn.outputs):
if mode == ModeKeys.PREDICT:
ctx.set_last_step_output(label, output)
else:
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
return step_fn
def experimental_tpu_fit_loop(model,
dataset,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_dataset=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for training with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_dataset: Dataset for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
mode = ModeKeys.TRAIN
# TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
steps_per_epoch = training_utils.infer_steps_for_dataset(
dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
if (current_strategy.extended.steps_per_run != 1 and
steps_per_epoch is None):
raise ValueError('`steps_per_epoch` should be specified when calling '
'`fit` on the model with TPUStrategy when '
'`steps_per_run` != 1 .')
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=1)
scope.__enter__()
out_labels = model.metrics_names or []
step_fn = _make_step_fn(model, ModeKeys.TRAIN, current_strategy, out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
use_steps = steps_per_epoch is not None
if use_steps:
iteration_value = min(steps_per_epoch,
current_strategy.extended.steps_per_run)
else:
iteration_value = current_strategy.extended.steps_per_run
steps_per_run = K.variable(
value=iteration_value,
dtype='int32',
name='steps_per_run')
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
# Calculate the steps each time on the device.
if use_steps:
steps_to_run = ([current_strategy.extended.steps_per_run] *
(steps_per_epoch //
current_strategy.extended.steps_per_run))
if steps_per_epoch % current_strategy.extended.steps_per_run:
steps_to_run.append(
steps_per_epoch % current_strategy.extended.steps_per_run)
target_steps = len(steps_to_run)
else:
target_steps = np.inf
callbacks._call_begin_hook(mode)
for epoch in range(initial_epoch, epochs):
distributed_training_utils._reset_metrics(model)
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
step_index = 0
prev_step_count = None
current_step = 0
while current_step < target_steps:
step_count = steps_to_run[current_step] if use_steps else 1
batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
if prev_step_count is None or step_count != prev_step_count:
steps_per_run.load(step_count, K.get_session())
prev_step_count = step_count
try:
_, outputs = K.batch_get_value([train_op, output_tensors])
except errors.OutOfRangeError:
if use_steps:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
else:
target_steps = current_step
logging.info('Dataset iterator ran out of data. Inferring the '
'value of `steps_per_epoch` as %s .' % target_steps)
distributed_training_utils.initialize_iterator(iterator,
current_strategy)
break
batch_logs.update(outputs)
callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
step_index = step_index + step_count
current_step += 1
if callbacks.model.stop_training:
break
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch)):
logging.info('Running validation at fit epoch: %s', epoch)
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
distributed_training_utils._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
val_outs = experimental_tpu_test_loop( # pylint: disable=undefined-variable
model,
val_dataset,
steps=validation_steps,
verbose=verbose,
callbacks=callbacks)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for label, val_out in zip(out_labels, val_outs):
epoch_logs['val_' + label] = val_out
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks._call_end_hook(mode)
if model._compile_distribution:
# Copy the weights back from the replicated model to the original model.
distributed_training_utils._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
scope.__exit__(None, None, None)
return model.history
def experimental_tpu_test_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset,
current_strategy)
steps = training_utils.infer_steps_for_dataset(dataset, steps,
steps_name='steps')
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
out_labels = model.metrics_names
step_fn = _make_step_fn(model, ModeKeys.TEST, current_strategy, out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=ModeKeys.TEST)
callbacks._call_begin_hook(mode)
outs = [0.] * len(model.metrics_names)
if steps is not None:
target_steps = steps
else:
target_steps = np.inf
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.batch_get_value([test_op, output_tensors])
except errors.OutOfRangeError:
if steps is not None:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
else:
warning_msg = 'Number of steps ran: {} steps'.format(current_step)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
target_steps = current_step
break
for i, label in enumerate(model.metrics_names):
if i == 0:
# Loss is stateless metrics.
outs[i] += batch_outs[label]
else:
# For all stateful metrics, the aggregation is handled by mirrored vars.
outs[i] = batch_outs[label]
batch_logs = cbks.make_logs(model, batch_logs, outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose >= 1:
progbar.update(current_step + 1)
current_step += 1
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(outs) >= 0:
outs[0] /= (target_steps)
if len(outs) == 1:
return outs[0]
return outs
def experimental_tpu_predict_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Predict loop for predicting with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
mode = ModeKeys.PREDICT
steps = training_utils.infer_steps_for_dataset(dataset, steps,
steps_name='steps')
dataset_fully_shaped = (distributed_training_utils.
is_dataset_shape_fully_defined(dataset))
padding_handler = None
if not dataset_fully_shaped:
# TODO(hongjunchoi): Investigate whether operations from
# PartialBatchPaddingHandler are unnecessarily pruned out
# during graph optimization.
padding_handler = padding_util.PartialBatchPaddingHandler(
model._feed_output_shapes)
batch_size, _, prefetch_buffer = input_lib._get_dataset_attributes(dataset)
padding_handler.padded_batch_size = batch_size
padding_handler.padding_mask = dataset.reduce(padding_handler.padding_mask,
padding_handler.update_mask)
dataset = dataset.map(padding_handler.pad_batch)
dataset = dataset.apply(batching.unbatch())
# Upon this point, it is guaranteed that the dataset does not
# have partial batches. Thus, we set `drop_remainder=True` to
# get static shape information about the elements in the dataset.
dataset = dataset.batch(batch_size, drop_remainder=True)
if prefetch_buffer is not None:
dataset = dataset.prefetch(prefetch_buffer)
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
out_labels = model.output_names
step_fn = _make_step_fn(model, ModeKeys.PREDICT, current_strategy, out_labels)
# Add initial dummy values for outputs.
initial_loop_values = {}
batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
for name, tensor in zip(model.output_names, model.outputs):
# TODO(priyag): This is a workaround as we do not know the batch dimension
# of the model's output at this point.
shape = tensor_shape.TensorShape(tensor.shape.dims)
shape.dims = [batch_dimension] + shape.dims[1:]
initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)
# TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
predict_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=mode)
callbacks._call_begin_hook(mode)
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = [[] for _ in model.outputs]
if steps is not None:
target_steps = steps
else:
target_steps = np.inf
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.batch_get_value([predict_op, output_tensors])
except errors.OutOfRangeError:
if steps is not None:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
else:
warning_msg = 'Number of steps ran: {} steps'.format(current_step)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
break
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i, label in enumerate(model.output_names):
unconcatenated_outs[i].extend(batch_outs[label])
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose >= 1:
progbar.update(current_step + 1)
current_step += 1
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(unconcatenated_outs) == 1:
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
else:
prediction_result = [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
if padding_handler:
prediction_result = padding_handler.apply_mask(prediction_result)
return prediction_result
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/interface-attributes/interface/interface-ref/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for interface-ref
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"interface-attributes",
"interface",
"interface-ref",
"state",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
interface = __builtin__.property(_get_interface)
subinterface = __builtin__.property(_get_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/interface-attributes/interface/interface-ref/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for interface-ref
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"interface-attributes",
"interface",
"interface-ref",
"state",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/interface_attributes/interface/interface_ref/state/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
interface = __builtin__.property(_get_interface)
subinterface = __builtin__.property(_get_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
|
|
#!/bin/sh -
# Copyright 2011,2012,2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If you have PyPy 1.6+ in a directory called pypy alongside pox.py, we
# use it.
# Otherwise, we try to use a Python interpreter called python2.7, which
# is a good idea if you're using Python from MacPorts, for example.
# We fall back to just "python" and hope that works.
#TODO: Make runnable by itself (paths need adjusting, etc.).
''''true
export OPT="-u -O"
export FLG=""
if [ "$(basename $0)" = "debug-pox.py" ]; then
export OPT=""
export FLG="--debug"
fi
if [ -x pypy/bin/pypy ]; then
exec pypy/bin/pypy $OPT "$0" $FLG "$@"
fi
if type python2.7 > /dev/null; then
exec python2.7 $OPT "$0" $FLG "$@"
fi
exec python $OPT "$0" $FLG "$@"
'''
from __future__ import print_function
import logging
import logging.config
import os
import sys
import traceback
import time
import inspect
import types
import threading
import pox.core
core = pox.core.initialize()
import pox.openflow
import pox.openflow.of_01
from pox.lib.util import str_to_bool
# Function to run on main thread
_main_thread_function = None
try:
import __pypy__
except ImportError:
__pypy__ = None
def _do_import (name):
"""
Try to import the named component.
Returns its module name if it was loaded or False on failure.
"""
def show_fail ():
traceback.print_exc()
print("Could not import module:", name)
def do_import2 (base_name, names_to_try):
if len(names_to_try) == 0:
print("Module not found:", base_name)
return False
name = names_to_try.pop(0)
if name in sys.modules:
return name
try:
__import__(name, level=0)
return name
except ImportError:
# There are two cases why this might happen:
# 1. The named module could not be found
# 2. Some dependent module (import foo) or some dependent
# name-in-a-module (e.g., from foo import bar) could not be found.
# If it's the former, we might try a name variation (e.g., without
# a leading "pox."), but if we ultimately can't find the named
# module, we just say something along those lines and stop.
# On the other hand, if the problem is with a dependency, we should
# print a stack trace so that it can be fixed.
# Sorting out the two cases is an ugly hack.
message = str(sys.exc_info()[1].args[0])
s = message.rsplit(" ", 1)
# Sadly, PyPy isn't consistent with CPython here.
#TODO: Check on this behavior in pypy 2.0.
if s[0] == "No module named" and (name.endswith(s[1]) or __pypy__):
# It was the one we tried to import itself. (Case 1)
# If we have other names to try, try them!
return do_import2(base_name, names_to_try)
elif message == "Import by filename is not supported.":
print(message)
import os.path
n = name.replace("/", ".").replace("\\", ".")
n = n.replace( os.path.sep, ".")
if n.startswith("pox.") or n.startswith("ext."):
n = n[4:]
print("Maybe you meant to run '%s'?" % (n,))
return False
else:
# This means we found the module we were looking for, but one
# of its dependencies was missing.
show_fail()
return False
except:
# There was some other sort of exception while trying to load the
# module. Just print a trace and call it a day.
show_fail()
return False
return do_import2(name, ["pox." + name, name])
def _do_imports (components):
"""
Import each of the listed components
Returns map of component_name->name,module,members on success,
or False on failure
"""
done = {}
for name in components:
if name in done: continue
r = _do_import(name)
if r is False:
return False
members = dict(inspect.getmembers(sys.modules[r]))
done[name] = (r,sys.modules[r],members)
return done
def _do_launch (argv):
component_order = []
components = {}
curargs = {}
pox_options = curargs
for arg in argv:
if not arg.startswith("-"):
if arg not in components:
components[arg] = []
curargs = {}
components[arg].append(curargs)
component_order.append(arg)
else:
arg = arg.lstrip("-").split("=", 1)
arg[0] = arg[0].replace("-", "_")
if len(arg) == 1: arg.append(True)
curargs[arg[0]] = arg[1]
_options.process_options(pox_options)
_pre_startup()
modules = _do_imports(n.split(':')[0] for n in component_order)
if modules is False:
return False
inst = {}
for name in component_order:
cname = name
inst[name] = inst.get(name, -1) + 1
params = components[name][inst[name]]
name = name.split(":", 1)
launch = name[1] if len(name) == 2 else "launch"
name = name[0]
name,module,members = modules[name]
if launch in members:
f = members[launch]
# We explicitly test for a function and not an arbitrary callable
if type(f) is not types.FunctionType:
print(launch, "in", name, "isn't a function!")
return False
if getattr(f, '_pox_eval_args', False):
import ast
for k,v in params.items():
if isinstance(v, str):
try:
params[k] = ast.literal_eval(v)
except:
# Leave it as a string
pass
multi = False
if f.func_code.co_argcount > 0:
#FIXME: This code doesn't look quite right to me and may be broken
# in some cases. We should refactor to use inspect anyway,
# which should hopefully just fix it.
if (f.func_code.co_varnames[f.func_code.co_argcount-1]
== '__INSTANCE__'):
# It's a multi-instance-aware component.
multi = True
# Special __INSTANCE__ paramter gets passed a tuple with:
# 1. The number of this instance (0...n-1)
# 2. The total number of instances for this module
# 3. True if this is the last instance, False otherwise
# The last is just a comparison between #1 and #2, but it's
# convenient.
params['__INSTANCE__'] = (inst[cname], len(components[cname]),
inst[cname] + 1 == len(components[cname]))
if multi == False and len(components[cname]) != 1:
print(name, "does not accept multiple instances")
return False
try:
if f(**params) is False:
# Abort startup
return False
except TypeError as exc:
instText = ''
if inst[cname] > 0:
instText = "instance {0} of ".format(inst[cname] + 1)
print("Error executing {2}{0}.{1}:".format(name,launch,instText))
if inspect.currentframe() is sys.exc_info()[2].tb_frame:
# Error is with calling the function
# Try to give some useful feedback
if _options.verbose:
traceback.print_exc()
else:
exc = sys.exc_info()[0:2]
print(''.join(traceback.format_exception_only(*exc)), end='')
print()
EMPTY = "<Unspecified>"
code = f.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
defaults = list((f.func_defaults) or [])
defaults = [EMPTY] * (argcount - len(defaults)) + defaults
args = {}
for n, a in enumerate(argnames):
args[a] = [EMPTY,EMPTY]
if n < len(defaults):
args[a][0] = defaults[n]
if a in params:
args[a][1] = params[a]
del params[a]
if '__INSTANCE__' in args:
del args['__INSTANCE__']
if f.__doc__ is not None:
print("Documentation for {0}:".format(name))
doc = f.__doc__.split("\n")
#TODO: only strip the same leading space as was on the first
# line
doc = map(str.strip, doc)
print('',("\n ".join(doc)).strip())
#print(params)
#print(args)
print("Parameters for {0}:".format(name))
if len(args) == 0:
print(" None.")
else:
print(" {0:25} {1:25} {2:25}".format("Name", "Default",
"Active"))
print(" {0:25} {0:25} {0:25}".format("-" * 15))
for k,v in args.iteritems():
print(" {0:25} {1:25} {2:25}".format(k,str(v[0]),
str(v[1] if v[1] is not EMPTY else v[0])))
if len(params):
print("This component does not have a parameter named "
+ "'{0}'.".format(params.keys()[0]))
return False
missing = [k for k,x in args.iteritems()
if x[1] is EMPTY and x[0] is EMPTY]
if len(missing):
print("You must specify a value for the '{0}' "
"parameter.".format(missing[0]))
return False
return False
else:
# Error is inside the function
raise
elif len(params) > 0 or launch is not "launch":
print("Module %s has no %s(), but it was specified or passed " \
"arguments" % (name, launch))
return False
return True
class Options (object):
def set (self, given_name, value):
name = given_name.replace("-", "_")
if name.startswith("_") or hasattr(Options, name):
# Hey, what's that about?
print("Illegal option:", given_name)
return False
has_field = hasattr(self, name)
has_setter = hasattr(self, "_set_" + name)
if has_field == False and has_setter == False:
print("Unknown option:", given_name)
return False
if has_setter:
setter = getattr(self, "_set_" + name)
setter(given_name, name, value)
else:
if isinstance(getattr(self, name), bool):
# Automatic bool-ization
value = str_to_bool(value)
setattr(self, name, value)
return True
def process_options (self, options):
for k,v in options.iteritems():
if self.set(k, v) is False:
# Bad option!
sys.exit(1)
_help_text = """
POX is a Software Defined Networking controller framework.
The commandline of POX is like:
pox.py [POX options] [C1 [C1 options]] [C2 [C2 options]] ...
Notable POX options include:
--verbose Print more debugging information (especially useful for
problems on startup)
--no-openflow Don't automatically load the OpenFlow module
--log-config=F Load a Python log configuration file (if you include the
option without specifying F, it defaults to logging.cfg)
C1, C2, etc. are component names (e.g., Python modules). Options they
support are up to the module. As an example, you can load a learning
switch app that listens on a non-standard port number by specifying an
option to the of_01 component, and loading the l2_learning component like:
./pox.py --verbose openflow.of_01 --port=6634 forwarding.l2_learning
The 'help' component can give help for other components. Start with:
./pox.py help --help
""".strip()
class POXOptions (Options):
def __init__ (self):
# self.cli = True
self.verbose = False
self.enable_openflow = True
self.log_config = None
def _set_h (self, given_name, name, value):
self._set_help(given_name, name, value)
def _set_help (self, given_name, name, value):
print(_help_text)
#TODO: Summarize options, etc.
sys.exit(0)
def _set_version (self, given_name, name, value):
print(core._get_python_version())
sys.exit(0)
def _set_no_openflow (self, given_name, name, value):
self.enable_openflow = not str_to_bool(value)
# def _set_no_cli (self, given_name, name, value):
# self.cli = not str_to_bool(value)
def _set_log_config (self, given_name, name, value):
if value is True:
# I think I use a better method for finding the path elsewhere...
p = os.path.dirname(os.path.realpath(__file__))
value = os.path.join(p, "..", "logging.cfg")
self.log_config = value
def _set_debug (self, given_name, name, value):
value = str_to_bool(value)
if value:
# Debug implies no openflow and no CLI and verbose
#TODO: Is this really an option we need/want?
self.verbose = True
self.enable_openflow = False
# self.cli = False
_options = POXOptions()
def _pre_startup ():
"""
This function is called after all the POX options have been read in
but before any components are loaded. This gives a chance to do
early setup (e.g., configure logging before a component has a chance
to try to log something!).
"""
_setup_logging()
if _options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if _options.enable_openflow:
pox.openflow.launch() # Default OpenFlow launch
def _post_startup ():
if _options.enable_openflow:
pox.openflow.of_01.launch() # Usually, we launch of_01
def _setup_logging ():
# First do some basic log config...
# This is kind of a hack, but we need to keep track of the handler we
# install so that we can, for example, uninstall it later. This code
# originally lived in pox.core, so we explicitly reference it here.
pox.core._default_log_handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
pox.core._default_log_handler.setFormatter(formatter)
logging.getLogger().addHandler(pox.core._default_log_handler)
logging.getLogger().setLevel(logging.INFO)
# Now set up from config file if specified...
#TODO:
# I think we could move most of the special log stuff into
# the log module. You'd just have to make a point to put the log
# module first on the commandline if you wanted later component
# initializations to honor it. Or it could be special-cased?
if _options.log_config is not None:
if not os.path.exists(_options.log_config):
print("Could not find logging config file:", _options.log_config)
sys.exit(2)
logging.config.fileConfig(_options.log_config,
disable_existing_loggers=True)
def set_main_function (f):
global _main_thread_function
if _main_thread_function == f: return True
if _main_thread_function is not None:
import logging
lg = logging.getLogger("boot")
lg.error("Could not set main thread function to: " + str(f))
lg.error("The main thread function is already "
+ "taken by: " + str(_main_thread_function))
return False
_main_thread_function = f
return True
def boot ():
"""
Start up POX.
"""
# Add pox directory to path
base = sys.path[0]
sys.path.insert(0, os.path.abspath(os.path.join(base, 'pox')))
sys.path.insert(0, os.path.abspath(os.path.join(base, 'ext')))
thread_count = threading.active_count()
quiet = False
try:
argv = sys.argv[1:]
# Always load cli (first!)
#TODO: Can we just get rid of the normal options yet?
pre = []
while len(argv):
if argv[0].startswith("-"):
pre.append(argv.pop(0))
else:
break
argv = pre + "py --disable".split() + argv
if _do_launch(argv):
_post_startup()
core.goUp()
else:
#return
quiet = True
raise RuntimeError()
except SystemExit:
return
except:
if not quiet:
traceback.print_exc()
# Try to exit normally, but do a hard exit if we don't.
# This is sort of a hack. What's the better option? Raise
# the going down event on core even though we never went up?
try:
for _ in range(4):
if threading.active_count() <= thread_count:
# Normal exit
return
time.sleep(0.25)
except:
pass
os._exit(1)
return
if _main_thread_function:
_main_thread_function()
else:
#core.acquire()
try:
while True:
if core.quit_condition.acquire(False):
core.quit_condition.wait(10)
core.quit_condition.release()
if not core.running: break
except:
pass
#core.scheduler._thread.join() # Sleazy
try:
pox.core.core.quit()
except:
pass
|
|
# -*- coding: utf-8 -*-
"""
@author : Armando Casillas <armcasillas@ucdavis.edu>
@author : Marco Pritoni <marco.pritoni@gmail.com>
Created on Wed Jul 26 2017
Update Aug 08 2017
"""
from __future__ import division
import pandas as pd
import os
import sys
import requests as req
import json
import numpy as np
import datetime
import pytz
from pandas import rolling_median
from matplotlib import style
import matplotlib
class TS_Util(object):
########################################################################
## simple load file section - eventually replace this with CSV_Importer
def _set_TS_index(self, data):
'''
Parameters
----------
Returns
-------
'''
# set index
data.index = pd.to_datetime(data.index)
# format types to numeric
for col in data.columns:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data
def load_TS(self, fileName, folder):
'''
Parameters
----------
Returns
-------
'''
path = os.path.join(folder, fileName)
data = pd.read_csv(path, index_col=0)
data = self._set_TS_index(data)
return data
########################################################################
## time correction for time zones - eventually replace this with CSV_Importer
def _utc_to_local(self, data, local_zone="America/Los_Angeles"):
'''
Function takes in pandas dataframe and adjusts index according to timezone in which is requested by user
Parameters
----------
data: Dataframe
pandas dataframe of json timeseries response from server
local_zone: string
pytz.timezone string of specified local timezone to change index to
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
data.index = data.index.tz_localize(pytz.utc).tz_convert(
local_zone) # accounts for localtime shift
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data
def _local_to_utc(self, timestamp, local_zone="America/Los_Angeles"):
'''
Parameters
----------
# Change timestamp request time to reflect request in terms of local time relative to utc - working as of 5/5/17 ( Should test more )
# remove and add to TS_Util and import
Returns
-------
'''
timestamp_new = pd.to_datetime(
timestamp, infer_datetime_format=True, errors='coerce')
timestamp_new = timestamp_new.tz_localize(
local_zone).tz_convert(pytz.utc)
timestamp_new = timestamp_new.strftime('%Y-%m-%d %H:%M:%S')
return timestamp_new
########################################################################
## remove start and end NaN: Note issue with multi-column df
def remove_start_NaN(self, data, var=None):
'''
Parameters
----------
Returns
-------
'''
if var: # limit to one or some variables
start_ok_data = data[var].first_valid_index()
else:
start_ok_data = data.first_valid_index()
data = data.loc[start_ok_data:, :]
return data
def remove_end_NaN(self, data, var=None):
'''
Parameters
----------
Returns
-------
'''
if var: # limit to one or some variables
end_ok_data = data[var].last_valid_index()
else:
end_ok_data = data.last_valid_index()
data = data.loc[:end_ok_data, :]
return data
########################################################################
## Missing data section
def _find_missing_return_frame(self, data):
'''
Function takes in pandas dataframe and find missing values in each column
Parameters
----------
data: Dataframe
Returns
-------
data: Dataframe
'''
return data.isnull()
def _find_missing(self, data, return_bool=False):
if return_bool == False: # this returns the full table with True where the condition is true
data = self._find_missing_return_frame(data)
return data
elif return_bool == "any": # this returns a bool selector if any of the column is True
bool_sel = self._find_missing_return_frame(data).any(axis=1)
return bool_sel
elif return_bool == "all": # this returns a bool selector if all of the column are True
bool_sel = self._find_missing_return_frame(data).all(axis=1)
return bool_sel
else:
print("error in multi_col_how input")
return
def display_missing(self, data, return_bool="any"):
'''
Parameters
----------
Returns
-------
'''
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[bool_sel]
def count_missing(self, data, output="number"):
'''
Parameters
----------
how = "number" or "percent"
Returns
-------
'''
count = self._find_missing(data,return_bool=False).sum()
if output == "number":
return count
elif output == "percent":
return ((count / (data.shape[0])) * 100)
def remove_missing(self, data, return_bool="any"):
'''
Parameters
----------
Returns
-------
'''
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[~bool_sel]
########################################################################
## Out of Bound section
def _find_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = ((data < lowBound) | (data > highBound))
return data
def display_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = data[self._find_outOfBound(
data, lowBound, highBound).any(axis=1)]
return data
def count_outOfBound(self, data, lowBound, highBound, output):
'''
Parameters
----------
Returns
-------
'''
count = self._find_outOfBound(data, lowBound, highBound).sum()
if output == "number":
return count
elif output == "percent":
return count / (data.shape[0]) * 1.0 * 100
def remove_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = data[~self._find_outOfBound(
data, lowBound, highBound).any(axis=1)]
return data
########################################################################
## Outliers section
def _calc_outliers_bounds(self, data, method, coeff, window):
'''
Parameters
----------
Returns
-------
'''
if method == "std":
lowBound = (data.mean(axis=0) - coeff * data.std(axis=0)).values[0]
highBound = (data.mean(axis=0) + coeff * data.std(axis=0)).values[0]
elif method == "rstd":
rl_mean=data.rolling(window=window).mean(how=any)
rl_std = data.rolling(window=window).std(how=any).fillna(method='bfill').fillna(method='ffill')
lowBound = rl_mean - coeff * rl_std
highBound = rl_mean + coeff * rl_std
elif method == "rmedian":
rl_med = data.rolling(window=window, center=True).median().fillna(
method='bfill').fillna(method='ffill')
lowBound = rl_med - coeff
highBound = rl_med + coeff
elif method == "iqr": # coeff is multip for std and IQR or threshold for rolling median
Q1 = data.quantile(.25) # coeff is multip for std or % of quartile
Q3 = data.quantile(.75)
IQR = Q3 - Q1
lowBound = Q1 - coeff * IQR
highBound = Q3 + coeff * IQR
elif method == "qtl":
lowBound = data.quantile(.005)
highBound = data.quantile(.995)
else:
print ("method chosen does not exist")
lowBound = None
highBound = None
return lowBound, highBound
def display_outliers(self, data, method, coeff, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
data = self.display_outOfBound(data, lowBound, highBound)
return data
def count_outliers(self, data, method, coeff, output, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
count = self.count_outOfBound(data, lowBound, highBound, output=output)
return count
def remove_outliers(self, data, method, coeff, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
data = self.remove_outOfBound(data, lowBound, highBound)
return data
########################################################################
## If condition section
def _find_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
#print(val)
bool_sel = (data == val)
return bool_sel
def _find_greater_than_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data > val)
return bool_sel
def _find_less_than_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data < val)
return bool_sel
def _find_greater_than_or_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data >= val)
return bool_sel
def _find_less_than_or_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data <= val)
return bool_sel
def _find_different_from_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = ~(data == val)
return bool_sel
def count_if(self, data, condition, val, output="number"):
"""
condition = "equal", "below", "above"
val = value to compare against
how = "number" or "percent"
"""
if condition == "=":
count = self._find_equal_to_values(data,val).sum()
elif condition == ">":
count = self._find_greater_than_values(data,val).sum()
elif condition == "<":
count = self._find_less_than_values(data,val).sum()
elif condition == ">=":
count = self._find_greater_than_or_equal_to_values(data,val).sum()
elif condition == "<=":
count = self._find_less_than_or_equal_to_values(data,val).sum()
elif condition == "!=":
count = self._find_different_from_values(data,val).sum()
if output == "number":
return count
elif output == "percent":
return count/data.shape[0]*1.0*100
return count
########################################################################
## Missing Data Events section
def get_start_events(self, data, var = "T_ctrl [oF]"): # create list of start events
'''
Parameters
----------
Returns
-------
'''
start_event = (data[var].isnull()) & ~(data[var].shift().isnull()) # find NaN start event
start = data[start_event].index.tolist() # selector for these events
if np.isnan(data.loc[data.index[0],var]): # if the first record is NaN
start = [data.index[0]] + start # add first record as starting time for first NaN event
else:
start = start
return start
def get_end_events(self, data, var = "T_ctrl [oF]"): # create list of end events
'''
Parameters
----------
Returns
-------
'''
end_events = ~(data[var].isnull()) & (data[var].shift().isnull()) # find NaN end events
end = data[end_events].index.tolist() # selector for these events
if ~np.isnan(data.loc[data.index[0],var]): # if first record is not NaN
end.remove(end[0]) # remove the endpoint ()
if np.isnan(data.loc[data.index[-1],var]): # if the last record is NaN
end = end + [data.index[-1]] # add last record as ending time for first NaN event
else:
end = end
return end
def create_event_table(self, data, var): # create dataframe of of start-end-length for current house/tstat
'''
Parameters
----------
Returns
-------
'''
# remove initial and final missing data
self.remove_start_NaN(data, var)
self.remove_end_NaN(data, var)
# create list of start events
start = self.get_start_events(data, var)
# create list of end events
end = self.get_end_events(data, var)
# merge lists into dataframe and calc length
events = pd.DataFrame.from_items([("start",start), ("end",end )])
events["length_min"] = (events["end"] - events["start"]).dt.total_seconds()/60 # note: this needs datetime index
#print events
events.set_index("start",inplace=True)
return events
|
|
"""
:copyright: (c) 2014 Building Energy Inc
:license: see LICENSE for more details.
"""
import os
import base64
import json
import hmac
import hashlib
from annoying.decorators import ajax_request
from django.conf import settings
from django.contrib.auth.decorators import login_required
from ajaxuploader.views import AjaxFileUploader
from data_importer.models import (
ImportFile,
ImportRecord,
)
from ajaxuploader.backends.local import LocalUploadBackend
from seed.utils.api import api_endpoint
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
@api_endpoint
@ajax_request
@login_required
def handle_s3_upload_complete(request):
"""
Notify the system that an upload to S3 has been completed. This is
a necessary step after uploading to S3 or the SEED instance will not
be aware the file exists.
Valid source_type values are found in ``seed.models.SEED_DATA_SOURCES``
:GET: Expects the following in the query string:
key: The full path to the file, within the S3 bucket.
E.g. data_importer/bldgs.csv
source_type: The source of the file.
E.g. 'Assessed Raw' or 'Portfolio Raw'
import_record: The ID of the ImportRecord this file belongs to.
Returns::
{'success': True,
'import_file_id': The ID of the newly-created ImportFile object.
}
"""
if 'S3' not in settings.DEFAULT_FILE_STORAGE:
return {'success': False,
'message': "Direct-to-S3 uploads not enabled"}
import_record_pk = request.REQUEST['import_record']
try:
record = ImportRecord.objects.get(pk=import_record_pk)
except ImportRecord.DoesNotExist:
#TODO: Remove the file from S3?
return {'success': False,
'message': "Import Record %s not found" % import_record_pk}
filename = request.REQUEST['key']
source_type = request.REQUEST['source_type']
f = ImportFile.objects.create(import_record=record,
file=filename,
source_type=source_type)
return {'success': True, "import_file_id": f.pk}
class DataImportBackend(LocalUploadBackend):
"""
Subclass of ajaxuploader's LocalUploadBackend, to handle
creation of ImportFile objects related to the specified
ImportRecord.
"""
def upload_complete(self, request, filename, *args, **kwargs):
"""
Called directly by fineuploader on upload completion.
"""
if 'S3' in settings.DEFAULT_FILE_STORAGE:
os.unlink(self.path)
raise ImproperlyConfigured("Local upload not supported")
super(DataImportBackend, self).upload_complete(
request, filename, *args, **kwargs
)
import_record_pk = request.REQUEST['import_record']
try:
record = ImportRecord.objects.get(pk=import_record_pk)
except ImportRecord.DoesNotExist:
#clean up the uploaded file
os.unlink(self.path)
return {'success': False,
'message': "Import Record %s not found" % import_record_pk}
source_type = request.REQUEST['source_type']
f = ImportFile.objects.create(import_record=record,
file=self.path,
source_type=source_type)
return {'success': True, "import_file_id": f.pk}
#this actually creates the django view for handling local file uploads.
#thus the use of decorators as functions instead of decorators.
local_uploader = AjaxFileUploader(backend=DataImportBackend)
local_uploader = login_required(local_uploader)
local_uploader = api_endpoint(local_uploader)
#API documentation and method name fix
local_uploader.__doc__ = \
"""
Endpoint to upload data files to, if uploading to local file storage.
Valid source_type values are found in ``seed.models.SEED_DATA_SOURCES``
:GET:
The following parameters are expected to be in the query string:
import_record: the ID of the ImportRecord to associate this file with.
qqfile: The name of the file
source_type: A valid source type (e.g. 'Portfolio Raw' or 'Assessed Raw')
Payload::
The content of the file as a data stream. Do not use multipart encoding.
Returns::
{'success': True,
'import_file_id': The ID of the newly-uploaded ImportFile
}
"""
local_uploader.__name__ = 'local_uploader'
@api_endpoint
@ajax_request
@login_required
def get_upload_details(request):
"""
Retrieves details about how to upload files to this instance.
Returns::
If S3 mode:
{
'upload_mode': 'S3',
'upload_complete': A url to notify that upload is complete,
'signature': The url to post file details to for auth to upload to S3.
}
If local filesystem mode:
{'upload_mode': 'filesystem',
'upload_path': The url to POST files to (see local_uploader)
}
"""
ret = {}
if 'S3' in settings.DEFAULT_FILE_STORAGE:
# S3 mode
ret['upload_mode'] = 'S3'
ret['upload_complete'] = reverse('data_importer:s3_upload_complete')
ret['signature'] = reverse('data_importer:sign_policy_document')
ret['aws_bucket_name'] = settings.AWS_BUCKET_NAME
ret['aws_client_key'] = settings.AWS_UPLOAD_CLIENT_KEY
else:
ret['upload_mode'] = 'filesystem'
ret['upload_path'] = reverse('data_importer:local_uploader')
return ret
@api_endpoint
@ajax_request
@login_required
def sign_policy_document(request):
"""
Sign and return the policy doucument for a simple upload.
http://aws.amazon.com/articles/1434/#signyours3postform
Payload::
{
"expiration": ISO-encoded timestamp for when signature should expire,
e.g. "2014-07-16T00:20:56.277Z",
"conditions":
[
{"acl":"private"},
{"bucket": The name of the bucket from get_upload_details},
{"Content-Type":"text/csv"},
{"success_action_status":"200"},
{"key": filename of upload, prefixed with 'data_imports/',
suffixed with a unique timestamp.
e.g. 'data_imports/my_buildings.csv.1405469756'},
{"x-amz-meta-category":"data_imports"},
{"x-amz-meta-qqfilename": original filename}
]
}
Returns::
{
"policy": A hash of the policy document. Using during upload to S3.
"signature": A signature of the policy document. Also used during
upload to S3.
}
"""
policy_document = json.loads(request.body)
policy = base64.b64encode(json.dumps(policy_document))
signature = base64.b64encode(
hmac.new(
settings.AWS_UPLOAD_CLIENT_SECRET_KEY, policy, hashlib.sha1
).digest()
)
return {
'policy': policy,
'signature': signature
}
|
|
#!/usr/bin/env python3
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import argparse
import multiprocessing
import os
from os.path import join as pjoin
import platform
import shutil
import sys
from sysconfig import get_config_vars
import numpy
from pkg_resources import parse_version
from setuptools import (
Command,
Extension,
setup,
)
from setuptools.command.build_ext import build_ext as _build_ext
import versioneer
cmdclass = versioneer.get_cmdclass()
def is_platform_windows():
return sys.platform == "win32" or sys.platform == "cygwin"
def is_platform_mac():
return sys.platform == "darwin"
min_cython_ver = "0.29.21" # note: sync with pyproject.toml
try:
from Cython import (
Tempita,
__version__ as _CYTHON_VERSION,
)
from Cython.Build import cythonize
_CYTHON_INSTALLED = parse_version(_CYTHON_VERSION) >= parse_version(min_cython_ver)
except ImportError:
_CYTHON_VERSION = None
_CYTHON_INSTALLED = False
cythonize = lambda x, *args, **kwargs: x # dummy func
_pxi_dep_template = {
"algos": ["_libs/algos_common_helper.pxi.in", "_libs/algos_take_helper.pxi.in"],
"hashtable": [
"_libs/hashtable_class_helper.pxi.in",
"_libs/hashtable_func_helper.pxi.in",
"_libs/khash_for_primitive_helper.pxi.in",
],
"index": ["_libs/index_class_helper.pxi.in"],
"sparse": ["_libs/sparse_op_helper.pxi.in"],
"interval": ["_libs/intervaltree.pxi.in"],
}
_pxifiles = []
_pxi_dep = {}
for module, files in _pxi_dep_template.items():
pxi_files = [pjoin("pandas", x) for x in files]
_pxifiles.extend(pxi_files)
_pxi_dep[module] = pxi_files
class build_ext(_build_ext):
@classmethod
def render_templates(cls, pxifiles):
for pxifile in pxifiles:
# build pxifiles first, template extension must be .pxi.in
assert pxifile.endswith(".pxi.in")
outfile = pxifile[:-3]
if (
os.path.exists(outfile)
and os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime
):
# if .pxi.in is not updated, no need to output .pxi
continue
with open(pxifile) as f:
tmpl = f.read()
pyxcontent = Tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
def build_extensions(self):
# if building from c files, don't need to
# generate template output
if _CYTHON_INSTALLED:
self.render_templates(_pxifiles)
super().build_extensions()
class CleanCommand(Command):
"""Custom command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
base = pjoin("pandas", "_libs", "src")
tsbase = pjoin("pandas", "_libs", "tslibs", "src")
dt = pjoin(tsbase, "datetime")
util = pjoin("pandas", "util")
parser = pjoin(base, "parser")
ujson_python = pjoin(base, "ujson", "python")
ujson_lib = pjoin(base, "ujson", "lib")
self._clean_exclude = [
pjoin(dt, "np_datetime.c"),
pjoin(dt, "np_datetime_strings.c"),
pjoin(parser, "tokenizer.c"),
pjoin(parser, "io.c"),
pjoin(ujson_python, "ujson.c"),
pjoin(ujson_python, "objToJSON.c"),
pjoin(ujson_python, "JSONtoObj.c"),
pjoin(ujson_python, "date_conversions.c"),
pjoin(ujson_lib, "ultrajsonenc.c"),
pjoin(ujson_lib, "ultrajsondec.c"),
pjoin(util, "move.c"),
]
for root, dirs, files in os.walk("pandas"):
for f in files:
filepath = pjoin(root, f)
if filepath in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in (
".pyc",
".so",
".o",
".pyo",
".pyd",
".c",
".cpp",
".orig",
):
self._clean_me.append(filepath)
for d in dirs:
if d == "__pycache__":
self._clean_trees.append(pjoin(root, d))
# clean the generated pxi files
for pxifile in _pxifiles:
pxifile = pxifile.replace(".pxi.in", ".pxi")
self._clean_me.append(pxifile)
for d in ("build", "dist"):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except OSError:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except OSError:
pass
# we need to inherit from the versioneer
# class as it encodes the version info
sdist_class = cmdclass["sdist"]
class CheckSDist(sdist_class):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = [
"pandas/_libs/arrays.pyx",
"pandas/_libs/lib.pyx",
"pandas/_libs/hashtable.pyx",
"pandas/_libs/tslib.pyx",
"pandas/_libs/index.pyx",
"pandas/_libs/internals.pyx",
"pandas/_libs/algos.pyx",
"pandas/_libs/join.pyx",
"pandas/_libs/indexing.pyx",
"pandas/_libs/interval.pyx",
"pandas/_libs/hashing.pyx",
"pandas/_libs/missing.pyx",
"pandas/_libs/reduction.pyx",
"pandas/_libs/testing.pyx",
"pandas/_libs/sparse.pyx",
"pandas/_libs/ops.pyx",
"pandas/_libs/parsers.pyx",
"pandas/_libs/tslibs/base.pyx",
"pandas/_libs/tslibs/ccalendar.pyx",
"pandas/_libs/tslibs/dtypes.pyx",
"pandas/_libs/tslibs/period.pyx",
"pandas/_libs/tslibs/strptime.pyx",
"pandas/_libs/tslibs/np_datetime.pyx",
"pandas/_libs/tslibs/timedeltas.pyx",
"pandas/_libs/tslibs/timestamps.pyx",
"pandas/_libs/tslibs/timezones.pyx",
"pandas/_libs/tslibs/conversion.pyx",
"pandas/_libs/tslibs/fields.pyx",
"pandas/_libs/tslibs/offsets.pyx",
"pandas/_libs/tslibs/parsing.pyx",
"pandas/_libs/tslibs/tzconversion.pyx",
"pandas/_libs/tslibs/vectorized.pyx",
"pandas/_libs/window/indexers.pyx",
"pandas/_libs/writers.pyx",
"pandas/io/sas/sas.pyx",
]
_cpp_pyxfiles = [
"pandas/_libs/window/aggregations.pyx",
]
def initialize_options(self):
sdist_class.initialize_options(self)
def run(self):
if "cython" in cmdclass:
self.run_command("cython")
else:
# If we are not running cython then
# compile the extensions correctly
pyx_files = [(self._pyxfiles, "c"), (self._cpp_pyxfiles, "cpp")]
for pyxfiles, extension in pyx_files:
for pyxfile in pyxfiles:
sourcefile = pyxfile[:-3] + extension
msg = (
f"{extension}-source file '{sourcefile}' not found.\n"
"Run 'setup.py cython' before sdist."
)
assert os.path.isfile(sourcefile), msg
sdist_class.run(self)
class CheckingBuildExt(build_ext):
"""
Subclass build_ext to get clearer report if Cython is necessary.
"""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print(f"{ext.name}: -> [{ext.sources}]")
raise Exception(
f"""Cython-generated file '{src}' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
"""
)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""
Custom command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op.
"""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
"""numpy's build_src command interferes with Cython's build_ext."""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass["clean"] = CleanCommand
cmdclass["build_ext"] = CheckingBuildExt
if _CYTHON_INSTALLED:
suffix = ".pyx"
cmdclass["cython"] = CythonCommand
else:
suffix = ".c"
cmdclass["build_src"] = DummyBuildSrc
# ----------------------------------------------------------------------
# Preparation of compiler arguments
debugging_symbols_requested = "--with-debugging-symbols" in sys.argv
if debugging_symbols_requested:
sys.argv.remove("--with-debugging-symbols")
if sys.byteorder == "big":
endian_macro = [("__BIG_ENDIAN__", "1")]
else:
endian_macro = [("__LITTLE_ENDIAN__", "1")]
extra_compile_args = []
extra_link_args = []
if is_platform_windows():
if debugging_symbols_requested:
extra_compile_args.append("/Z7")
extra_link_args.append("/DEBUG")
else:
# PANDAS_CI=1 is set by ci/setup_env.sh
if os.environ.get("PANDAS_CI", "0") == "1":
extra_compile_args.append("-Werror")
if debugging_symbols_requested:
extra_compile_args.append("-g")
extra_compile_args.append("-UNDEBUG")
extra_compile_args.append("-O0")
# Build for at least macOS 10.9 when compiling on a 10.9 system or above,
# overriding CPython distuitls behaviour which is to target the version that
# python was built for. This may be overridden by setting
# MACOSX_DEPLOYMENT_TARGET before calling setup.py
if is_platform_mac():
if "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
current_system = platform.mac_ver()[0]
python_target = get_config_vars().get(
"MACOSX_DEPLOYMENT_TARGET", current_system
)
target_macos_version = "10.9"
parsed_macos_version = parse_version(target_macos_version)
if (
parse_version(str(python_target)) < parsed_macos_version
and parse_version(current_system) >= parsed_macos_version
):
os.environ["MACOSX_DEPLOYMENT_TARGET"] = target_macos_version
if sys.version_info[:2] == (3, 8): # GH 33239
extra_compile_args.append("-Wno-error=deprecated-declarations")
# https://github.com/pandas-dev/pandas/issues/35559
extra_compile_args.append("-Wno-error=unreachable-code")
# enable coverage by building cython files by setting the environment variable
# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext
# with `--with-cython-coverage`enabled
linetrace = os.environ.get("PANDAS_CYTHON_COVERAGE", False)
if "--with-cython-coverage" in sys.argv:
linetrace = True
sys.argv.remove("--with-cython-coverage")
# Note: if not using `cythonize`, coverage can be enabled by
# pinning `ext.cython_directives = directives` to each ext in extensions.
# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy
directives = {"linetrace": False, "language_level": 3}
macros = []
if linetrace:
# https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
directives["linetrace"] = True
macros = [("CYTHON_TRACE", "1"), ("CYTHON_TRACE_NOGIL", "1")]
# silence build warnings about deprecated API usage
# we can't do anything about these warnings because they stem from
# cython+numpy version mismatches.
macros.append(("NPY_NO_DEPRECATED_API", "0"))
# ----------------------------------------------------------------------
# Specification of Dependencies
# TODO: Need to check to see if e.g. `linetrace` has changed and possibly
# re-compile.
def maybe_cythonize(extensions, *args, **kwargs):
"""
Render tempita templates before calling cythonize. This is skipped for
* clean
* sdist
"""
if "clean" in sys.argv or "sdist" in sys.argv:
# See https://github.com/cython/cython/issues/1495
return extensions
elif not _CYTHON_INSTALLED:
# GH#28836 raise a helfpul error message
if _CYTHON_VERSION:
raise RuntimeError(
f"Cannot cythonize with old Cython version ({_CYTHON_VERSION} "
f"installed, needs {min_cython_ver})"
)
raise RuntimeError("Cannot cythonize without Cython installed.")
# reuse any parallel arguments provided for compilation to cythonize
parser = argparse.ArgumentParser()
parser.add_argument("--parallel", "-j", type=int, default=1)
parsed, _ = parser.parse_known_args()
kwargs["nthreads"] = parsed.parallel
build_ext.render_templates(_pxifiles)
return cythonize(extensions, *args, **kwargs)
def srcpath(name=None, suffix=".pyx", subdir="src"):
return pjoin("pandas", subdir, name + suffix)
lib_depends = ["pandas/_libs/src/parse_helper.h"]
klib_include = ["pandas/_libs/src/klib"]
tseries_depends = [
"pandas/_libs/tslibs/src/datetime/np_datetime.h",
"pandas/_libs/tslibs/src/datetime/np_datetime_strings.h",
]
ext_data = {
"_libs.algos": {
"pyxfile": "_libs/algos",
"include": klib_include,
"depends": _pxi_dep["algos"],
},
"_libs.arrays": {"pyxfile": "_libs/arrays"},
"_libs.groupby": {"pyxfile": "_libs/groupby"},
"_libs.hashing": {"pyxfile": "_libs/hashing", "depends": []},
"_libs.hashtable": {
"pyxfile": "_libs/hashtable",
"include": klib_include,
"depends": (
["pandas/_libs/src/klib/khash_python.h", "pandas/_libs/src/klib/khash.h"]
+ _pxi_dep["hashtable"]
),
},
"_libs.index": {
"pyxfile": "_libs/index",
"include": klib_include,
"depends": _pxi_dep["index"],
},
"_libs.indexing": {"pyxfile": "_libs/indexing"},
"_libs.internals": {"pyxfile": "_libs/internals"},
"_libs.interval": {
"pyxfile": "_libs/interval",
"include": klib_include,
"depends": _pxi_dep["interval"],
},
"_libs.join": {"pyxfile": "_libs/join", "include": klib_include},
"_libs.lib": {
"pyxfile": "_libs/lib",
"depends": lib_depends + tseries_depends,
"include": klib_include, # due to tokenizer import
"sources": ["pandas/_libs/src/parser/tokenizer.c"],
},
"_libs.missing": {"pyxfile": "_libs/missing", "depends": tseries_depends},
"_libs.parsers": {
"pyxfile": "_libs/parsers",
"include": klib_include + ["pandas/_libs/src"],
"depends": [
"pandas/_libs/src/parser/tokenizer.h",
"pandas/_libs/src/parser/io.h",
],
"sources": [
"pandas/_libs/src/parser/tokenizer.c",
"pandas/_libs/src/parser/io.c",
],
},
"_libs.reduction": {"pyxfile": "_libs/reduction"},
"_libs.ops": {"pyxfile": "_libs/ops"},
"_libs.ops_dispatch": {"pyxfile": "_libs/ops_dispatch"},
"_libs.properties": {"pyxfile": "_libs/properties"},
"_libs.reshape": {"pyxfile": "_libs/reshape", "depends": []},
"_libs.sparse": {"pyxfile": "_libs/sparse", "depends": _pxi_dep["sparse"]},
"_libs.tslib": {"pyxfile": "_libs/tslib", "depends": tseries_depends},
"_libs.tslibs.base": {"pyxfile": "_libs/tslibs/base"},
"_libs.tslibs.ccalendar": {"pyxfile": "_libs/tslibs/ccalendar"},
"_libs.tslibs.dtypes": {"pyxfile": "_libs/tslibs/dtypes"},
"_libs.tslibs.conversion": {
"pyxfile": "_libs/tslibs/conversion",
"depends": tseries_depends,
"sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
},
"_libs.tslibs.fields": {
"pyxfile": "_libs/tslibs/fields",
"depends": tseries_depends,
},
"_libs.tslibs.nattype": {"pyxfile": "_libs/tslibs/nattype"},
"_libs.tslibs.np_datetime": {
"pyxfile": "_libs/tslibs/np_datetime",
"depends": tseries_depends,
"sources": [
"pandas/_libs/tslibs/src/datetime/np_datetime.c",
"pandas/_libs/tslibs/src/datetime/np_datetime_strings.c",
],
},
"_libs.tslibs.offsets": {
"pyxfile": "_libs/tslibs/offsets",
"depends": tseries_depends,
},
"_libs.tslibs.parsing": {
"pyxfile": "_libs/tslibs/parsing",
"include": klib_include,
"depends": ["pandas/_libs/src/parser/tokenizer.h"],
"sources": ["pandas/_libs/src/parser/tokenizer.c"],
},
"_libs.tslibs.period": {
"pyxfile": "_libs/tslibs/period",
"depends": tseries_depends,
"sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
},
"_libs.tslibs.strptime": {
"pyxfile": "_libs/tslibs/strptime",
"depends": tseries_depends,
},
"_libs.tslibs.timedeltas": {
"pyxfile": "_libs/tslibs/timedeltas",
"depends": tseries_depends,
},
"_libs.tslibs.timestamps": {
"pyxfile": "_libs/tslibs/timestamps",
"depends": tseries_depends,
},
"_libs.tslibs.timezones": {"pyxfile": "_libs/tslibs/timezones"},
"_libs.tslibs.tzconversion": {
"pyxfile": "_libs/tslibs/tzconversion",
"depends": tseries_depends,
},
"_libs.tslibs.vectorized": {"pyxfile": "_libs/tslibs/vectorized"},
"_libs.testing": {"pyxfile": "_libs/testing"},
"_libs.window.aggregations": {
"pyxfile": "_libs/window/aggregations",
"language": "c++",
"suffix": ".cpp",
"depends": ["pandas/_libs/src/skiplist.h"],
},
"_libs.window.indexers": {"pyxfile": "_libs/window/indexers"},
"_libs.writers": {"pyxfile": "_libs/writers"},
"io.sas._sas": {"pyxfile": "io/sas/sas"},
}
extensions = []
for name, data in ext_data.items():
source_suffix = suffix if suffix == ".pyx" else data.get("suffix", ".c")
sources = [srcpath(data["pyxfile"], suffix=source_suffix, subdir="")]
sources.extend(data.get("sources", []))
include = data.get("include", [])
include.append(numpy.get_include())
undef_macros = []
if (
sys.platform == "zos"
and data.get("language") == "c++"
and os.path.basename(os.environ.get("CXX", "/bin/xlc++")) in ("xlc", "xlc++")
):
data.get("macros", macros).append(("__s390__", "1"))
extra_compile_args.append("-qlanglvl=extended0x:nolibext")
undef_macros.append("_POSIX_THREADS")
obj = Extension(
f"pandas.{name}",
sources=sources,
depends=data.get("depends", []),
include_dirs=include,
language=data.get("language", "c"),
define_macros=data.get("macros", macros),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
undef_macros=undef_macros,
)
extensions.append(obj)
# ----------------------------------------------------------------------
# ujson
if suffix == ".pyx":
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith((".c", ".cpp")):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension(
"pandas._libs.json",
depends=[
"pandas/_libs/src/ujson/lib/ultrajson.h",
"pandas/_libs/src/ujson/python/date_conversions.h",
],
sources=(
[
"pandas/_libs/src/ujson/python/ujson.c",
"pandas/_libs/src/ujson/python/objToJSON.c",
"pandas/_libs/src/ujson/python/date_conversions.c",
"pandas/_libs/src/ujson/python/JSONtoObj.c",
"pandas/_libs/src/ujson/lib/ultrajsonenc.c",
"pandas/_libs/src/ujson/lib/ultrajsondec.c",
]
+ [
"pandas/_libs/tslibs/src/datetime/np_datetime.c",
"pandas/_libs/tslibs/src/datetime/np_datetime_strings.c",
]
),
include_dirs=[
"pandas/_libs/src/ujson/python",
"pandas/_libs/src/ujson/lib",
"pandas/_libs/src/datetime",
numpy.get_include(),
],
extra_compile_args=(["-D_GNU_SOURCE"] + extra_compile_args),
extra_link_args=extra_link_args,
define_macros=macros,
)
extensions.append(ujson_ext)
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Freeze to support parallel compilation when using spawn instead of fork
multiprocessing.freeze_support()
setup(
version=versioneer.get_version(),
ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
cmdclass=cmdclass,
)
|
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
import glob
import os
import re
import sys, urllib
import shutil
import tarfile
import zipfile
import subprocess
def cmake_command(build_dir, source_dir, cmake_params):
"""Invoke correct cmake commands to configure a build directory.
@param build_dir: out-of-source build directory. method will
chdir there before invoking cmake
@param source_dir: location of the source that will be built
@cmake_params: string of "-Dparam=blaat -Dparam2=blaat" specifying
cmake parameters
"""
# first create correct cmake invocation
cmake = '%s %s' % (config.CMAKE_BINPATH, config.CMAKE_DEFAULT_PARAMS)
if len(config.CMAKE_PRE_VARS):
cmake = config.CMAKE_PRE_VARS + ' ' + cmake
# then go to build_dir
os.chdir(build_dir)
# then invoke cmake
ret = os.system("%s %s %s" %
(cmake, cmake_params, source_dir))
# on windows, we have to do this a second time (first time
# configures, second time generates)
if os.name == 'nt':
ret = os.system("%s %s %s" %
(cmake, cmake_params, source_dir))
return ret
def copy_glob(src_glob, dst_dir):
"""Copy all files and dirs included by src_glob into the directory specified in dst_dir.
e.g. usage: copy_glob('/etc/*', '/backup/my_etc/')
"""
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
if not os.path.isdir(dst_dir):
raise RuntimeError('%s is not a directory.' % (dst_dir,))
for fn in glob.glob(src_glob):
if os.path.isdir(fn):
# copytree needs full path in srt and dst
# e.g. copytree('/build/dir/numpy', 'python/lib/site-packages/numpy')
shutil.copytree(fn,os.path.join(dst_dir,os.path.basename(fn)), symlinks=True)
else:
# shutil is clever enough to take a directory as destination
shutil.copy(fn, dst_dir)
def find_command_with_ver(name, command, ver_re):
"""Try to run command, use ver_re regular expression to parse for
the version string. This will print for example:
CVS: version 2.11 found.
@return: True if command found, False if not or if version could
not be parsed.
"""
retval = False
s,o = get_status_output(command)
if s:
msg2 = 'NOT FOUND!'
else:
mo = re.search(ver_re, o, re.MULTILINE)
if mo:
msg2 = 'version %s found.' % (mo.groups()[0],)
retval = True
else:
msg2 = 'could not extract version.'
output("%s: %s" % (name, msg2))
return retval
def find_files(start_dir, re_pattern='.*\.(pyd|dll)', exclude_pats=[]):
"""Recursively find all files (not directories) with filenames
matching given regular expression. Case is ignored.
@param start_dir: search starts in this directory
@param re_pattern: regular expression with which all found files
will be matched. example: re_pattern = '.*\.(pyd|dll)' will match
all filenames ending in pyd or dll.
@param exclude_pats: if filename (without directory) matches any
one of these patterns, do not include it in the list
@return: list of fully qualified filenames that satisfy the
pattern
"""
cpat = re.compile(re_pattern, re.IGNORECASE)
found_files = []
excluded_files = []
for dirpath, dirnames, filenames in os.walk(start_dir):
ndirpath = os.path.normpath(os.path.abspath(dirpath))
for fn in filenames:
if cpat.match(fn):
# see if fn does not satisfy one of the exclude
# patterns
exclude_fn = False
for exclude_pat in exclude_pats:
if re.match(exclude_pat, fn, re.IGNORECASE):
exclude_fn = True
break
if not exclude_fn:
found_files.append(os.path.join(ndirpath,fn))
else:
excluded_files.append(os.path.join(ndirpath,fn))
return found_files, excluded_files
def get_status_output(command):
"""Run command, return output of command and exit code in status.
In general, status is None for success and 1 for command not
found.
"""
ph = os.popen(command)
output = ph.read()
status = ph.close()
return (status, output)
def output(message, rpad=0, rpad_char='#'):
s = "#####J> %s" % (message,)
pn = rpad - len(s)
if pn < 0:
pn = 0
p = pn * rpad_char
print "%s %s" % (s,p)
# flush the buffer, else things are out of sync in any log files
sys.stdout.flush()
def error(message):
raise RuntimeError('!!!!! %s' % (message,))
def file_exists(posix_file, nt_file):
"""Used to perform platform-specific file existence check.
"""
if os.name == 'posix':
fn = posix_file
else: # os.name == 'nt'
fn = nt_file
return os.path.exists(fn)
def human_size(num):
"""Method to convert number of bytes to human-readable version.
Code from http://blogmag.net/blog/read/38/Print_human_readable_file_size
"""
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
def make_command(solution_file, install=False, project=None,
win_buildtype=None):
"""Install packages can use this method to invoke the
platform-specific compile command. This can only be run after
config.init() has run.
@param solution_file: only used on Windows, ignored on *ix.
@param install: if true, invokes the make command to install the
built project.
@param project: Only build the named project on Windows. This
overrides the install setting!
@param win_buildtype: change the buildtype on windows, default
value is None, which gets translated to the value of
config.BUILD_TARGET.
"""
if os.name == 'posix':
if install:
make_command = '%s install' % (config.MAKE,)
else:
make_command = config.MAKE
else: # os.name == 'nt'
if install:
prj = 'INSTALL'
else:
prj = 'ALL_BUILD'
if project:
prj = project
if win_buildtype:
buildtype = win_buildtype
else:
buildtype = config.BUILD_TARGET
make_command = config.MAKE % \
(solution_file, prj, buildtype, buildtype)
return os.system(make_command)
def urlget(url, output_filename=None):
"""Simple method to retrieve URL. It will get the file in the current
directory.
If urlget guesses the wrong download filename based on the URL, pass
the output_filename parameter.
FIXME: this does not trap 404 errors. Seems the best way to do this is
to override FancyURLOpener with a new http_error_default
"""
def reporthook(blocknum, blocksize, totalsize):
current_size = blocknum * blocksize
current_size_kb = int(current_size / 1024.0)
sys.stdout.write(
'% 4.0f %% (%d Kbytes) downloaded\r' %
(current_size / float(totalsize) * 100.0, current_size_kb))
if output_filename:
filename = output_filename
else:
i = url.rfind('/')
filename = url[i+1:]
print url, "->", filename
if os.path.exists(filename):
output("%s already present, skipping download." % (filename,))
else:
urllib.urlretrieve(url, filename, reporthook)
sys.stdout.write("\n")
output("Download complete.")
return filename
def goto_archive():
os.chdir(config.archive_dir)
def goto_build():
os.chdir(config.build_dir)
def goto_inst():
os.chdir(config.inst_dir)
def unpack(archive_filename):
"""Unpacks given archive_filename in the current directory. It is
the caller's responsibility to make sure the current directory is
the desired destination.
It's preferable to make use of wrapper methods such as
unpack_build and unpack_install.
"""
tar = None
zip = None
if archive_filename.lower().endswith('bz2'):
m = 'r|bz2'
tar = tarfile.open(archive_filename, m)
elif archive_filename.lower().endswith('gz'):
m = 'r|gz'
tar = tarfile.open(archive_filename, m)
else:
zip = zipfile.ZipFile(archive_filename)
if tar:
# extractall is from python 2.5 onwards
# tar.extractall()
# we use a form that works on previous versions as well
for tarinfo in tar:
print tarinfo.name
tar.extract(tarinfo)
tar.close()
else:
for zipinfo in zip.infolist():
# first check if we need to create the directory housing
# the file
dn = os.path.dirname(zipinfo.filename)
if dn and not os.path.isdir(dn):
os.makedirs(dn)
# we only extract the file if it's not purely a directory
if not os.path.isdir(zipinfo.filename):
print "%s - %s" % (zipinfo.filename, \
human_size(zipinfo.file_size))
# have to write this in binary mode, else we screw up
# binaries (EXEs and such) quite badly. :)
f = open(zipinfo.filename, 'wb')
f.write(zip.read(zipinfo.filename))
f.close()
zip.close()
def unpack_archive(archive_filename):
"""Unpack given archive_filename in the archive (sources) directory.
"""
goto_archive()
unpack(archive_filename)
def unpack_build(archive_filename):
"""Unpack given archive_filename in build directory.
"""
goto_build()
unpack(archive_filename)
def unpack_inst(archive_filename):
"""Unpack given archive_filename in installation directory.
"""
goto_inst()
unpack(archive_filename)
def re_sub_filter_file(repls, filename):
"""Given a list of repls (tuples with regular expresions and
replacement patterns that are used as the first and second params
of re.sub), filter filename line by line.
A backup of the file is made to filename.orig.
"""
newfilename = '%s.new' % (filename,)
origfilename = '%s.orig' % (filename,)
shutil.copyfile(filename, origfilename)
ifile = file(filename)
ofile = file(newfilename, 'w')
for l in ifile:
for r in repls:
l = re.sub(r[0], r[1], l)
ofile.write(l)
ifile.close()
ofile.close()
shutil.copyfile(newfilename, filename)
os.unlink(newfilename)
os.unlink(origfilename)
def execute_in_vs_environment(post_commands, pre_commands='', communicate=''):
""" Executes the specified commands as if from the Visual Studio
command prompt. "vcvarsall.bat" needs to be on the PATH for this.
post_commands: Commands executed after setting up the environment.
This should be one string (separate using '&').
pre_commands: Executed before setting the environment.
communicate: Command sent to stdin after post_commands.
"""
if config.WINARCH == '64bit':
astr = 'amd64'
else:
astr = 'x86'
if pre_commands:
if pre_commands[-1] != '&':
pre_commands += '&'
if post_commands:
if post_commands[0] != '&':
post_commands = '&' + post_commands
p = subprocess.Popen('%s%s %s%s' % (
pre_commands,
"vcvarsall.bat",
astr,
post_commands),
shell=True, stdin=subprocess.PIPE)
if communicate:
p.communicate(communicate)
return p.wait()
|
|
import time
import uuid
from datetime import datetime
from unittest import SkipTest
from django.conf import settings
from django.db import connections, DEFAULT_DB_ALIAS
from django.test import TestCase
from corehq.apps.change_feed.data_sources import get_document_store_for_doc_type
from corehq.form_processor.backends.sql.dbaccessors import (
FormReindexAccessor, CaseReindexAccessor,
LedgerAccessorSQL, LedgerReindexAccessor
)
from corehq.form_processor.models import LedgerValue, CommCareCase
from corehq.form_processor.tests.utils import FormProcessorTestUtils, create_form_for_test, sharded
class BaseReindexAccessorTest(object):
accessor_class = None
doc_type = None
@classmethod
def setUpClass(cls):
if settings.USE_PARTITIONED_DATABASE:
# https://github.com/nose-devs/nose/issues/946
raise SkipTest('Only applicable if no sharding is setup')
super(BaseReindexAccessorTest, cls).setUpClass()
cls.domain = uuid.uuid4().hex
cls.other_domain = uuid.uuid4().hex
# since this test depends on the global form list just wipe everything
FormProcessorTestUtils.delete_all_sql_forms()
FormProcessorTestUtils.delete_all_ledgers()
FormProcessorTestUtils.delete_all_sql_cases()
@classmethod
def setup_reindexers(cls):
cls.first_batch_domain = cls._get_doc_ids(cls._create_docs(cls.domain, 4))
batch = cls._create_docs(cls.other_domain, 4)
cls.first_batch_global = cls.first_batch_domain + cls._get_doc_ids(batch)
cls.middle_id = batch[-1].pk
time.sleep(.02)
cls.second_batch_domain = cls._get_doc_ids(cls._create_docs(cls.domain, 4))
batch = cls._create_docs(cls.other_domain, 4)
cls.second_batch_global = cls.second_batch_domain + cls._get_doc_ids(batch)
time.sleep(.02)
cls.end_id = batch[-1].pk
cls.all_doc_ids = cls.first_batch_global + cls.second_batch_global
cls.all_doc_ids_domain = cls.first_batch_domain + cls.second_batch_domain
cls._analyse()
@classmethod
def _analyse(cls):
db_cursor = connections[DEFAULT_DB_ALIAS].cursor()
with db_cursor as cursor:
cursor.execute('ANALYSE') # the doc count query relies on this
@classmethod
def tearDownClass(cls):
FormProcessorTestUtils.delete_all_sql_forms()
FormProcessorTestUtils.delete_all_ledgers()
FormProcessorTestUtils.delete_all_sql_cases()
super(BaseReindexAccessorTest, cls).tearDownClass()
def _get_docs(self, last_doc_pk=None, limit=500):
return self.accessor_class().get_docs(None, last_doc_pk=last_doc_pk, limit=limit)
def _get_docs_for_domain(self, domain, last_doc_pk=None, limit=500):
return self.accessor_class(domain=domain).get_docs(None, last_doc_pk=last_doc_pk, limit=limit)
def test_get_docs(self):
docs = self._get_docs()
self.assertEqual(len(self.all_doc_ids), len(docs))
self.assertEqual(set(self._get_doc_ids(docs)),
set(self.all_doc_ids))
docs = self._get_docs(self.middle_id)
self.assertEqual(8, len(docs))
self.assertEqual(set(self._get_doc_ids(docs)),
set(self.second_batch_global))
self.assertEqual(0, len(self._get_docs(self.end_id)))
def test_get_docs_for_domain(self):
docs = self._get_docs_for_domain(self.domain, None)
self.assertEqual(len(self.all_doc_ids_domain), len(docs))
self.assertEqual(set(self._get_doc_ids(docs)),
set(self.all_doc_ids_domain))
docs = self._get_docs_for_domain(self.domain, self.middle_id)
self.assertEqual(len(self.second_batch_domain), len(docs))
self.assertEqual(set(self._get_doc_ids(docs)),
set(self.second_batch_domain))
self.assertEqual(0, len(self._get_docs_for_domain(self.domain, self.end_id)))
def test_ids_only(self):
doc_ids = [row.doc_id for row in self.accessor_class().get_doc_ids(None)]
self.assertListEqual(doc_ids, self.all_doc_ids)
def test_limit(self):
docs = self._get_docs(limit=2)
self.assertEqual(2, len(docs))
self.assertEqual(self._get_doc_ids(docs), self.first_batch_global[:2])
def test_last_doc_pk(self):
docs = self._get_docs(self.middle_id, limit=2)
self.assertEqual(self._get_doc_ids(docs), self.second_batch_global[:2])
last_doc = self.accessor_class().get_doc(self.second_batch_global[0])
docs = self._get_docs(last_doc_pk=last_doc.pk, limit=2)
self.assertEqual(self._get_doc_ids(docs), self.second_batch_global[1:3])
def test_get_doc_count(self):
self.assertEqual(16, self.accessor_class().get_approximate_doc_count(DEFAULT_DB_ALIAS))
def test_get_doc_count_domain(self):
self.assertEqual(8, self.accessor_class(domain=self.domain).get_approximate_doc_count(DEFAULT_DB_ALIAS))
def test_doc_store(self):
doc_store = get_document_store_for_doc_type(self.domain, self.doc_type)
self.assertSetEqual(set(self.all_doc_ids_domain), set(doc_store.iter_document_ids()))
@sharded
class UnshardedCaseReindexAccessorTests(BaseReindexAccessorTest, TestCase):
accessor_class = CaseReindexAccessor
doc_type = 'CommCareCase'
@classmethod
def setUpClass(cls):
super(UnshardedCaseReindexAccessorTests, cls).setUpClass()
cls.setup_reindexers()
@classmethod
def _create_docs(cls, domain, count):
case_ids = [uuid.uuid4().hex for i in range(count)]
[create_form_for_test(domain, case_id=case_id) for case_id in case_ids]
return CommCareCase.objects.get_cases(case_ids, ordered=True)
@classmethod
def _get_doc_ids(cls, docs):
return [doc.case_id for doc in docs]
@sharded
class UnshardedFormReindexAccessorTests(BaseReindexAccessorTest, TestCase):
accessor_class = FormReindexAccessor
doc_type = 'XFormInstance'
@classmethod
def setUpClass(cls):
super(UnshardedFormReindexAccessorTests, cls).setUpClass()
cls.setup_reindexers()
@classmethod
def _create_docs(cls, domain, count):
return [create_form_for_test(domain) for i in range(count)]
@classmethod
def _get_doc_ids(cls, docs):
return [doc.form_id for doc in docs]
@sharded
class UnshardedLedgerReindexAccessorTests(BaseReindexAccessorTest, TestCase):
accessor_class = LedgerReindexAccessor
doc_type = 'ledger'
@classmethod
def setUpClass(cls):
super(UnshardedLedgerReindexAccessorTests, cls).setUpClass()
cls.setup_reindexers()
@classmethod
def _create_docs(cls, domain, count):
return [_create_ledger(domain, 'product_a', 10) for i in range(count)]
@classmethod
def _get_doc_ids(cls, docs):
return [doc.ledger_reference.as_id() for doc in docs]
def _create_ledger(domain, entry_id, balance, case_id=None, section_id='stock'):
user_id = 'user1'
utcnow = datetime.utcnow()
case_id = case_id or uuid.uuid4().hex
case = CommCareCase(
case_id=case_id,
domain=domain,
type='',
owner_id=user_id,
opened_on=utcnow,
modified_on=utcnow,
modified_by=user_id,
server_modified_on=utcnow,
)
case.save(with_tracked_models=True)
ledger = LedgerValue(
domain=domain,
case_id=case_id,
section_id=section_id,
entry_id=entry_id,
balance=balance,
last_modified=utcnow
)
LedgerAccessorSQL.save_ledger_values([ledger])
return ledger
|
|
import mock
from stem import Flag
from twisted.internet import defer
from twisted.trial import unittest
from oppy.path import path
from oppy.path import util as path_util
from oppy.path.exceptions import NoUsableGuardsException
class PathTest(unittest.TestCase):
@mock.patch('oppy.path.path.selectExitNode', return_value='exit_fprint')
@mock.patch('oppy.path.path.selectGuardNode', return_value='guard_fprint')
@mock.patch('oppy.path.path.selectMiddleNode', return_value='middle_fprint')
def test_getPath(self, mock_sm, mock_sg, mock_se):
mock_ns = mock.Mock()
mock_gm = mock.Mock()
mock_exit_rs = mock.Mock()
mock_exit_rs.digest = 'exit digest'
mock_middle_rs = mock.Mock()
mock_middle_rs.digest = 'middle digest'
mock_guard_rs = mock.Mock()
mock_guard_rs.digest = 'guard digest'
mock_consensus = mock.Mock()
mock_consensus.routers = {'exit_fprint': mock_exit_rs,
'middle_fprint': mock_middle_rs,
'guard_fprint': mock_guard_rs}
mock_consensus.bandwidth_weights = 'bandwidth weights'
mock_exit_request = mock.Mock()
mock_exit_request.port = 'port'
mock_descriptors = {'exit digest': 'exit_fprint',
'middle digest': 'middle_fprint',
'guard digest': 'guard_fprint'}
mock_ns.getMicroconsensus = mock.Mock()
mock_ns.getMicroconsensus.return_value = defer.succeed(mock_consensus)
mock_ns.getMicrodescriptorsForCircuit = mock.Mock()
mock_ns.getMicrodescriptorsForCircuit.return_value = \
defer.succeed(mock_descriptors)
mock_gm.getUsableGuards = mock.Mock()
mock_gm.getUsableGuards.return_value = defer.succeed('guards')
mock_fast = True
mock_stable = True
mock_internal = False
mock_bwweightscale = path.DEFAULT_BWWEIGHTSCALE
ret = path.getPath(mock_ns, mock_gm, mock_exit_request, mock_fast,
mock_stable, mock_internal)
mock_se.assert_called_once_with(mock_consensus.bandwidth_weights,
mock_bwweightscale,
mock_consensus.routers,
mock_descriptors,
mock_fast,
mock_stable,
mock_internal,
mock_exit_request.port)
mock_sg.assert_called_once_with(mock_consensus.routers,
mock_descriptors,
'guards',
mock_fast,
mock_stable,
'exit_fprint',
mock_exit_rs)
mock_sm.assert_called_once_with(mock_consensus.bandwidth_weights,
mock_bwweightscale,
mock_consensus.routers,
mock_descriptors,
mock_fast,
mock_stable,
'exit_fprint',
mock_exit_rs,
'guard_fprint',
mock_guard_rs)
self.assertEqual(self.successResultOf(ret),
path.Path(path.PathNode('guard_fprint', mock_guard_rs),
path.PathNode('middle_fprint', mock_middle_rs),
path.PathNode('exit_fprint', mock_exit_rs)))
@mock.patch('oppy.path.path.selectExitNode', side_effect=ValueError())
def test_getPath_exception(self, mock_se):
mock_ns = mock.Mock()
mock_gm = mock.Mock()
mock_exit_rs = mock.Mock()
mock_exit_rs.digest = 'exit digest'
mock_middle_rs = mock.Mock()
mock_middle_rs.digest = 'middle digest'
mock_guard_rs = mock.Mock()
mock_guard_rs.digest = 'guard digest'
mock_consensus = mock.Mock()
mock_consensus.routers = {'exit_fprint': mock_exit_rs,
'middle_fprint': mock_middle_rs,
'guard_fprint': mock_guard_rs}
mock_consensus.bandwidth_weights = 'bandwidth weights'
mock_exit_request = mock.Mock()
mock_exit_request.port = 'port'
mock_descriptors = {'exit digest': 'exit_fprint',
'middle digest': 'middle_fprint',
'guard digest': 'guard_fprint'}
mock_ns.getMicroconsensus = mock.Mock()
mock_ns.getMicroconsensus.return_value = defer.succeed(mock_consensus)
mock_ns.getMicrodescriptorsForCircuit = mock.Mock()
mock_ns.getMicrodescriptorsForCircuit.return_value = \
defer.succeed(mock_descriptors)
mock_gm.getUsableGuards = mock.Mock()
mock_gm.getUsableGuards.return_value = defer.succeed('guards')
mock_fast = True
mock_stable = True
mock_internal = False
mock_bwweightscale = path.DEFAULT_BWWEIGHTSCALE
self.assertEqual(
self.failureResultOf(
path.getPath(mock_ns,
mock_gm,
mock_exit_request,
mock_fast,
mock_stable,
mock_internal))\
.trap(path.PathSelectionFailedException),
path.PathSelectionFailedException)
@mock.patch('oppy.path.util.getPositionWeights', return_value='weights')
@mock.patch('oppy.path.util.getWeightedNodes', return_value='nodes')
@mock.patch('oppy.path.util.selectWeightedNode', return_value='blah')
@mock.patch('oppy.path.path.filterExits', return_value=['exit1', 'exit2'])
def test_selectExitNode(self, mock_fe, mock_swn, mock_gwn, mock_gpw):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
internal = 'internal'
port = 'port'
_ = path.selectExitNode(
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
mock_fe.assert_called_once_with(
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
mock_gpw.assert_called_once_with(
['exit1', 'exit2'],
cons_rel_stats,
'e',
bw_weights,
bwweightscale)
mock_gwn.assert_called_once_with(['exit1', 'exit2'], 'weights')
mock_swn.assert_called_once_with('nodes')
@mock.patch('oppy.path.util.getPositionWeights', return_value='weights')
@mock.patch('oppy.path.util.getWeightedNodes', return_value='nodes')
@mock.patch('oppy.path.util.selectWeightedNode', return_value='blah')
@mock.patch('oppy.path.path.filterExits', return_value=['exit1', 'exit2'])
def test_selectExitNode_internal(self, mock_fe, mock_swn, mock_gwn, mock_gpw):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
internal = True
port = 'port'
_ = path.selectExitNode(
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
mock_fe.assert_called_once_with(
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
mock_gpw.assert_called_once_with(
['exit1', 'exit2'],
cons_rel_stats,
'm',
bw_weights,
bwweightscale)
@mock.patch('oppy.path.path.filterExits', return_value=[])
def test_selectExitNode_no_exits(self, mock_fe):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
internal = 'internal'
port = 'port'
self.assertRaises(ValueError,
path.selectExitNode,
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
@mock.patch('oppy.path.util.getPositionWeights', return_value='weights')
@mock.patch('oppy.path.util.getWeightedNodes', return_value='nodes')
@mock.patch('oppy.path.util.selectWeightedNode', return_value='blah')
@mock.patch('oppy.path.path.filterExits', return_value=['exit1'])
def test_selectExitNode_one_exit(self, mock_fe, mock_swn, mock_gwn, mock_gpw):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
internal = 'internal'
port = 'port'
ret = path.selectExitNode(
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
self.assertEqual(ret, 'exit1')
mock_fe.assert_called_once_with(
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
self.assertEqual(mock_gpw.call_count, 0)
self.assertEqual(mock_gwn.call_count, 0)
self.assertEqual(mock_swn.call_count, 0)
@mock.patch('random.choice')
@mock.patch('oppy.path.path.guardFilter', return_value=True)
def test_selectGuardNode(self, mock_gf, mock_rc):
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
guards = ['guard']
fast = 'fast'
stable = 'stable'
exit_desc = 'exit_desc'
exit_status_entry = 'exit_status_entry'
_ = path.selectGuardNode(
cons_rel_stats,
descriptors,
guards,
fast,
stable,
exit_desc,
exit_status_entry)
mock_gf.assert_called_once_with(
'guard',
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry)
mock_rc.assert_called_once_with(['guard'])
def test_selectGuardNode_no_guards(self):
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
guards = []
fast = 'fast'
stable = 'stable'
exit_desc = 'exit_desc'
exit_status_entry = 'exit_status_entry'
self.assertRaises(NoUsableGuardsException,
path.selectGuardNode,
cons_rel_stats,
descriptors,
guards,
fast,
stable,
exit_desc,
exit_status_entry)
@mock.patch('oppy.path.util.getPositionWeights', return_value='weights')
@mock.patch('oppy.path.util.getWeightedNodes', return_value='nodes')
@mock.patch('oppy.path.util.selectWeightedNode')
@mock.patch('oppy.path.path.filterMiddles', return_value=['middle1', 'middle2'])
def test_selectMiddleNode(self, mock_fm, mock_swn, mock_gwn, mock_gpw):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
exit_desc = 'exit_desc'
exit_status_entry = 'exit_status_entry'
guard_desc = 'guard_desc'
guard_status_entry = 'guard_status_entry'
_ = path.selectMiddleNode(
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry)
mock_fm.assert_called_once_with(
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry)
mock_gpw.assert_called_once_with(
['middle1', 'middle2'],
cons_rel_stats,
'm',
bw_weights,
bwweightscale)
mock_gwn.assert_called_once_with(['middle1', 'middle2'], 'weights')
mock_swn.assert_called_once_with('nodes')
@mock.patch('oppy.path.util.getPositionWeights', return_value='weights')
@mock.patch('oppy.path.util.getWeightedNodes', return_value='nodes')
@mock.patch('oppy.path.util.selectWeightedNode')
@mock.patch('oppy.path.path.filterMiddles', return_value=[])
def test_selectMiddleNode_no_nodes(self, mock_fm, mock_swn, mock_gwn, mock_gpw):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
exit_desc = 'exit_desc'
exit_status_entry = 'exit_status_entry'
guard_desc = 'guard_desc'
guard_status_entry = 'guard_status_entry'
self.assertRaises(ValueError,
path.selectMiddleNode,
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry)
@mock.patch('oppy.path.util.getPositionWeights', return_value='weights')
@mock.patch('oppy.path.util.getWeightedNodes', return_value='nodes')
@mock.patch('oppy.path.util.selectWeightedNode', return_value='blah')
@mock.patch('oppy.path.path.filterMiddles', return_value=['middle'])
def test_selectMiddleNode_one_node(self, mock_fm, mock_swn, mock_gwn, mock_gpw):
bw_weights = 'bw_weights'
bwweightscale = 'bwweightscale'
cons_rel_stats = 'cons_rel_stats'
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
exit_desc = 'exit_desc'
exit_status_entry = 'exit_status_entry'
guard_desc = 'guard_desc'
guard_status_entry = 'guard_status_entry'
ret = path.selectMiddleNode(
bw_weights,
bwweightscale,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry)
self.assertEqual(ret, 'middle')
mock_fm.assert_called_once_with(
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry)
self.assertEqual(mock_gpw.call_count, 0)
self.assertEqual(mock_gwn.call_count, 0)
self.assertEqual(mock_swn.call_count, 0)
@mock.patch('oppy.path.path.exitFilter', return_value=True)
def test_filterExits(self, mock_ef):
cons_rel_stats = ['test']
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
internal = 'internal'
port = 'port'
ret = path.filterExits(
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
self.assertEqual(ret, ['test'])
mock_ef.assert_called_once_with(
'test',
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port)
@mock.patch('oppy.path.path.middleFilter', return_value=True)
def test_filterMiddles(self, mock_mf):
cons_rel_stats = ['test']
descriptors = 'descriptors'
fast = 'fast'
stable = 'stable'
exit_desc = 'exit_desc'
exit_status_entry = 'exit_status_entry'
guard_desc = 'guard_desc'
guard_status_entry = 'guard_status_entry'
ret = path.filterMiddles(
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry)
self.assertEqual(ret, ['test'])
mock_mf.assert_called_once_with(
'test',
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable)
def test_exitFilter_no_consensus_entry(self):
exit_fprint = 'exit_fprint'
cons_rel_stats = {}
descriptors = {'exit_fprint': 'exit_desc'}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_no_descriptor(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'exit_digest'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_no_ntor_key(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = None
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_badexit_flag(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.BADEXIT)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_not_running(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = ()
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_not_valid(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_not_fast_want_fast(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.VALID)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_not_stable_want_stable(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.VALID, Flag.FAST)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertFalse(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_internal(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.VALID, Flag.STABLE, Flag.FAST)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
desc.exit_policy = mock.Mock()
desc.exit_policy.can_exit_to = mock.Mock()
desc.exit_policy.can_exit_to.return_value = False
desc.exit_policy.is_exiting_allowed = False
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = True
port = 0
self.assertTrue(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port))
def test_exitFilter_have_port(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.VALID, Flag.STABLE, Flag.FAST)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
desc.exit_policy = mock.Mock()
desc.exit_policy.can_exit_to = mock.Mock()
desc.exit_policy.can_exit_to.return_value = 'test retval'
desc.exit_policy.is_exiting_allowed = False
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = 0
self.assertEqual(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port),
'test retval')
def test_exitFilter_no_port(self):
exit_fprint = 'exit_fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.VALID, Flag.STABLE, Flag.FAST)
rel_stat.digest = 'exit_digest'
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-key'
desc.exit_policy = mock.Mock()
desc.exit_policy.can_exit_to = mock.Mock()
desc.exit_policy.can_exit_to.return_value = False
desc.exit_policy.is_exiting_allowed = 'test retval'
cons_rel_stats = {'exit_fprint': rel_stat}
descriptors = {'exit_digest': desc}
fast = True
stable = True
internal = False
port = None
self.assertEqual(path.exitFilter(
exit_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
internal,
port),
'test retval')
def test_guardFilter_no_consensus_entry(self):
guard_fprint = 'guard_fprint'
cons_rel_stats = {}
descriptors = {'guard_fprint': 'guard_desc'}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
self.assertFalse(path.guardFilter(
guard_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry))
def test_guardFilter_no_descriptor(self):
guard_fprint = 'guard fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'guard digest'
cons_rel_stats = {'guard fprint': rel_stat}
descriptors = {}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
self.assertFalse(path.guardFilter(
guard_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry))
def test_guardFilter_want_fast_no_fast(self):
guard_fprint = 'guard fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'guard digest'
rel_stat.flags = ()
cons_rel_stats = {'guard fprint': rel_stat}
descriptors = {'guard digest': 'guard desc'}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
self.assertFalse(path.guardFilter(
guard_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry))
def test_guardFilter_want_stable_no_stable(self):
guard_fprint = 'guard fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'guard digest'
rel_stat.flags = (Flag.FAST)
cons_rel_stats = {'guard fprint': rel_stat}
descriptors = {'guard digest': 'guard desc'}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
self.assertFalse(path.guardFilter(
guard_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry))
@mock.patch('oppy.path.util.nodeUsableWithOther', return_value='test val')
def test_guardFilter(self, mock_nuwo):
guard_fprint = 'guard fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'guard digest'
rel_stat.flags = (Flag.FAST, Flag.STABLE)
cons_rel_stats = {'guard fprint': rel_stat}
descriptors = {'guard digest': 'guard desc'}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
self.assertEqual(path.guardFilter(
guard_fprint,
cons_rel_stats,
descriptors,
fast,
stable,
exit_desc,
exit_status_entry), 'test val')
def test_middleFilter_no_consensus_entry(self):
middle_fprint = 'middle fprint'
cons_rel_stats = {}
descriptors = {'middle fprint': 'middle desc'}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
def test_middleFilter_no_descriptor(self):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
descriptors = {}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
def test_middleFilter_no_ntor_key(self):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
desc = mock.Mock()
desc.ntor_onion_key = None
descriptors = {'middle digest': desc}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
def test_middleFilter_not_running(self):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.flags = ()
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-onion-key'
descriptors = {'middle digest': desc}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
def test_middleFilter_want_fast_no_fast(self):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING)
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-onion-key'
descriptors = {'middle digest': desc}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
def test_middleFilter_want_stable_no_stable(self):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.FAST)
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-onion-key'
descriptors = {'middle digest': desc}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
@mock.patch('oppy.path.util.nodeUsableWithOther', return_value=False)
def test_middleFilter_not_usable_with_exit(self, mock_nuwo):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.FAST, Flag.STABLE)
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-onion-key'
descriptors = {'middle digest': desc}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertFalse(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable))
@mock.patch('oppy.path.util.nodeUsableWithOther', return_value='test val')
def test_middleFilter(self, mock_nuwo):
middle_fprint = 'middle fprint'
rel_stat = mock.Mock()
rel_stat.flags = (Flag.RUNNING, Flag.FAST, Flag.STABLE)
rel_stat.digest = 'middle digest'
cons_rel_stats = {'middle fprint': rel_stat}
desc = mock.Mock()
desc.ntor_onion_key = 'ntor-onion-key'
descriptors = {'middle digest': desc}
fast = True
stable = True
exit_desc = 'exit desc'
exit_status_entry = 'exit se'
guard_desc = 'guard desc'
guard_status_entry = 'guard se'
self.assertEqual(path.middleFilter(
middle_fprint,
cons_rel_stats,
descriptors,
exit_desc,
exit_status_entry,
guard_desc,
guard_status_entry,
fast,
stable), 'test val')
|
|
# -*- coding: utf-8 -*-
"""TimeDelayingRidge class."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Ross Maddox <ross.maddox@rochester.edu>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
from .base import BaseEstimator
from ..cuda import _setup_cuda_fft_multiply_repeated
from ..filter import next_fast_len
from ..fixes import jit
from ..parallel import check_n_jobs
from ..utils import warn, ProgressBar, logger
def _compute_corrs(X, y, smin, smax, n_jobs=1, fit_intercept=False,
edge_correction=True):
"""Compute auto- and cross-correlations."""
if fit_intercept:
# We could do this in the Fourier domain, too, but it should
# be a bit cleaner numerically to do it here.
X_offset = np.mean(X, axis=0)
y_offset = np.mean(y, axis=0)
if X.ndim == 3:
X_offset = X_offset.mean(axis=0)
y_offset = np.mean(y_offset, axis=0)
X = X - X_offset
y = y - y_offset
else:
X_offset = y_offset = 0.
if X.ndim == 2:
assert y.ndim == 2
X = X[:, np.newaxis, :]
y = y[:, np.newaxis, :]
assert X.shape[:2] == y.shape[:2]
len_trf = smax - smin
len_x, n_epochs, n_ch_x = X.shape
len_y, n_epcohs, n_ch_y = y.shape
assert len_x == len_y
n_fft = next_fast_len(2 * X.shape[0] - 1)
n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(
n_jobs, [1.], n_fft, 'correlation calculations')
# create our Toeplitz indexer
ij = np.empty((len_trf, len_trf), int)
for ii in range(len_trf):
ij[ii, ii:] = np.arange(len_trf - ii)
x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1)
ij[ii + 1:, ii] = x
x_xt = np.zeros([n_ch_x * len_trf] * 2)
x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F')
n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x)
logger.info('Fitting %d epochs, %d channels' % (n_epochs, n_ch_x))
pb = ProgressBar(n, mesg='Sample')
count = 0
pb.update(count)
for ei in range(n_epochs):
this_X = X[:, ei, :]
# XXX maybe this is what we should parallelize over CPUs at some point
X_fft = cuda_dict['rfft'](this_X, n=n_fft, axis=0)
X_fft_conj = X_fft.conj()
y_fft = cuda_dict['rfft'](y[:, ei, :], n=n_fft, axis=0)
for ch0 in range(n_ch_x):
for oi, ch1 in enumerate(range(ch0, n_ch_x)):
this_result = cuda_dict['irfft'](
X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0)
# Our autocorrelation structure is a Toeplitz matrix, but
# it's faster to create the Toeplitz ourselves than use
# linalg.toeplitz.
this_result = this_result[ij]
# However, we need to adjust for coeffs that are cut off,
# i.e. the non-zero delays should not have the same AC value
# as the zero-delay ones (because they actually have fewer
# coefficients).
#
# These adjustments also follow a Toeplitz structure, so we
# construct a matrix of what has been left off, compute their
# inner products, and remove them.
if edge_correction:
_edge_correct(this_result, this_X, smax, smin, ch0, ch1)
# Store the results in our output matrix
x_xt[ch0 * len_trf:(ch0 + 1) * len_trf,
ch1 * len_trf:(ch1 + 1) * len_trf] += this_result
if ch0 != ch1:
x_xt[ch1 * len_trf:(ch1 + 1) * len_trf,
ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T
count += 1
pb.update(count)
# compute the crosscorrelations
cc_temp = cuda_dict['irfft'](
y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0)
if smin < 0 and smax >= 0:
x_y[:-smin, ch0] += cc_temp[smin:]
x_y[len_trf - smax:, ch0] += cc_temp[:smax]
else:
x_y[:, ch0] += cc_temp[smin:smax]
count += 1
pb.update(count)
x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F')
return x_xt, x_y, n_ch_x, X_offset, y_offset
@jit()
def _edge_correct(this_result, this_X, smax, smin, ch0, ch1):
if smax > 0:
tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0],
this_X[-1:-smax:-1, ch1])
if smin > 0:
tail = tail[smin - 1:, smin - 1:]
this_result[max(-smin + 1, 0):, max(-smin + 1, 0):] -= tail
if smin < 0:
head = _toeplitz_dot(this_X[:-smin, ch0],
this_X[:-smin, ch1])[::-1, ::-1]
if smax < 0:
head = head[:smax, :smax]
this_result[:-smin, :-smin] -= head
@jit()
def _toeplitz_dot(a, b):
"""Create upper triangular Toeplitz matrices & compute the dot product."""
# This is equivalent to:
# a = linalg.toeplitz(a)
# b = linalg.toeplitz(b)
# a[np.triu_indices(len(a), 1)] = 0
# b[np.triu_indices(len(a), 1)] = 0
# out = np.dot(a.T, b)
assert a.shape == b.shape and a.ndim == 1
out = np.outer(a, b)
for ii in range(1, len(a)):
out[ii, ii:] += out[ii - 1, ii - 1:-1]
out[ii + 1:, ii] += out[ii:-1, ii - 1]
return out
def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct',
normed=False):
"""Compute regularization parameter from neighbors."""
from scipy.sparse.csgraph import laplacian
known_types = ('ridge', 'laplacian')
if isinstance(reg_type, str):
reg_type = (reg_type,) * 2
if len(reg_type) != 2:
raise ValueError('reg_type must have two elements, got %s'
% (len(reg_type),))
for r in reg_type:
if r not in known_types:
raise ValueError('reg_type entries must be one of %s, got %s'
% (known_types, r))
reg_time = (reg_type[0] == 'laplacian' and n_delays > 1)
reg_chs = (reg_type[1] == 'laplacian' and n_ch_x > 1)
if not reg_time and not reg_chs:
return np.eye(n_ch_x * n_delays)
# regularize time
if reg_time:
reg = np.eye(n_delays)
stride = n_delays + 1
reg.flat[1::stride] += -1
reg.flat[n_delays::stride] += -1
reg.flat[n_delays + 1:-n_delays - 1:stride] += 1
args = [reg] * n_ch_x
reg = linalg.block_diag(*args)
else:
reg = np.zeros((n_delays * n_ch_x,) * 2)
# regularize features
if reg_chs:
block = n_delays * n_delays
row_offset = block * n_ch_x
stride = n_delays * n_ch_x + 1
reg.flat[n_delays:-row_offset:stride] += -1
reg.flat[n_delays + row_offset::stride] += 1
reg.flat[row_offset:-n_delays:stride] += -1
reg.flat[:-(n_delays + row_offset):stride] += 1
assert np.array_equal(reg[::-1, ::-1], reg)
if method == 'direct':
if normed:
norm = np.sqrt(np.diag(reg))
reg /= norm
reg /= norm[:, np.newaxis]
return reg
else:
# Use csgraph. Note that our -1's above are really the neighbors!
# If we ever want to allow arbitrary adjacency matrices, this is how
# we'd want to do it.
reg = laplacian(-reg, normed=normed)
return reg
def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in):
"""Fit the model using correlation matrices."""
# do the regularized solving
n_ch_out = x_y.shape[1]
assert x_y.shape[0] % n_ch_x == 0
n_delays = x_y.shape[0] // n_ch_x
reg = _compute_reg_neighbors(n_ch_x, n_delays, reg_type)
mat = x_xt + alpha * reg
# From sklearn
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
w = linalg.solve(mat, x_y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warn('Singular matrix in solving dual problem. Using '
'least-squares solution instead.')
w = linalg.lstsq(mat, x_y, lapack_driver='gelsy')[0]
w = w.T.reshape([n_ch_out, n_ch_in, n_delays])
return w
class TimeDelayingRidge(BaseEstimator):
"""Ridge regression of data with time delays.
Parameters
----------
tmin : int | float
The starting lag, in seconds (or samples if ``sfreq`` == 1).
Negative values correspond to times in the past.
tmax : int | float
The ending lag, in seconds (or samples if ``sfreq`` == 1).
Positive values correspond to times in the future.
Must be >= tmin.
sfreq : float
The sampling frequency used to convert times into samples.
alpha : float
The ridge (or laplacian) regularization factor.
reg_type : str | list
Can be "ridge" (default) or "laplacian".
Can also be a 2-element list specifying how to regularize in time
and across adjacent features.
fit_intercept : bool
If True (default), the sample mean is removed before fitting.
n_jobs : int | str
The number of jobs to use. Can be an int (default 1) or ``'cuda'``.
.. versionadded:: 0.18
edge_correction : bool
If True (default), correct the autocorrelation coefficients for
non-zero delays for the fact that fewer samples are available.
Disabling this speeds up performance at the cost of accuracy
depending on the relationship between epoch length and model
duration. Only used if ``estimator`` is float or None.
.. versionadded:: 0.18
See Also
--------
mne.decoding.ReceptiveField
Notes
-----
This class is meant to be used with :class:`mne.decoding.ReceptiveField`
by only implicitly doing the time delaying. For reasonable receptive
field and input signal sizes, it should be more CPU and memory
efficient by using frequency-domain methods (FFTs) to compute the
auto- and cross-correlations.
"""
_estimator_type = "regressor"
def __init__(self, tmin, tmax, sfreq, alpha=0., reg_type='ridge',
fit_intercept=True, n_jobs=1, edge_correction=True):
if tmin > tmax:
raise ValueError('tmin must be <= tmax, got %s and %s'
% (tmin, tmax))
self.tmin = float(tmin)
self.tmax = float(tmax)
self.sfreq = float(sfreq)
self.alpha = float(alpha)
self.reg_type = reg_type
self.fit_intercept = fit_intercept
self.edge_correction = edge_correction
self.n_jobs = n_jobs
@property
def _smin(self):
return int(round(self.tmin * self.sfreq))
@property
def _smax(self):
return int(round(self.tmax * self.sfreq)) + 1
def fit(self, X, y):
"""Estimate the coefficients of the linear model.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples[, n_epochs], n_outputs)
The target values.
Returns
-------
self : instance of TimeDelayingRidge
Returns the modified instance.
"""
if X.ndim == 3:
assert y.ndim == 3
assert X.shape[:2] == y.shape[:2]
else:
assert X.ndim == 2 and y.ndim == 2
assert X.shape[0] == y.shape[0]
n_jobs = check_n_jobs(self.n_jobs, allow_cuda=True)
# These are split into two functions because it's possible that we
# might want to allow people to do them separately (e.g., to test
# different regularization parameters).
self.cov_, x_y_, n_ch_x, X_offset, y_offset = _compute_corrs(
X, y, self._smin, self._smax, n_jobs, self.fit_intercept,
self.edge_correction)
self.coef_ = _fit_corrs(self.cov_, x_y_, n_ch_x,
self.reg_type, self.alpha, n_ch_x)
# This is the sklearn formula from LinearModel (will be 0. for no fit)
if self.fit_intercept:
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T)
else:
self.intercept_ = 0.
return self
def predict(self, X):
"""Predict the output.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The data.
Returns
-------
X : ndarray
The predicted response.
"""
if X.ndim == 2:
X = X[:, np.newaxis, :]
singleton = True
else:
singleton = False
out = np.zeros(X.shape[:2] + (self.coef_.shape[0],))
smin = self._smin
offset = max(smin, 0)
for ei in range(X.shape[1]):
for oi in range(self.coef_.shape[0]):
for fi in range(self.coef_.shape[1]):
temp = np.convolve(X[:, ei, fi], self.coef_[oi, fi])
temp = temp[max(-smin, 0):][:len(out) - offset]
out[offset:len(temp) + offset, ei, oi] += temp
out += self.intercept_
if singleton:
out = out[:, 0, :]
return out
|
|
import random
import math
tolerance_error = 0.05
alpha = 0.3
threshold = 0.0
cluster_count = 0
parameter_assigned = dict()
member = list()
network_dimensions = list()
start_nodes= list()
end_nodes =list()
prev_vector = list()
vector = list()
min_dist_index = list()
v_center = list()
distance = list()
nodes = list()
training = list()
checking = list()
end_member_check = dict()
network = dict()
path_weights = dict()
error_at_nodes = dict()
node_values = dict()
data_parmeter_count = 0
def neuron_function(x):
return 1/(1+math.exp(-x))
def neuron_function_dash(x):
return neuron_function(x)*(1-neuron_function(x))
def reset_node_values():
for i in nodes:
node_values[i] = 1.0
def reset_error_values():
for i in nodes:
if i in start_nodes:
error_at_nodes[i] = 0.0
else:
error_at_nodes[i] = 1.0
def assign_input(node,value):
node_values[node] = value
def calculate_node_value(x):
temp = 0.0
if x not in start_nodes:
temp = 0.0
for i in (network[x])[0]:
temp = temp + (node_values[i]*path_weights[str(i)+"_"+str(x)])
node_values[x] = round(neuron_function(temp - threshold),6)
def calculate_output():
for i in (sorted(network.keys())):
calculate_node_value(i)
def pass_values_to_network(i):
for j in (start_nodes):
assign_input(j, training[parameter_assigned[j]][i])
calculate_output()
def pass_check_values_to_network(i):
for j in (start_nodes):
assign_input(j, checking[parameter_assigned[j]][i])
calculate_output()
def total_error (i):
for j in end_nodes:
error_at_nodes[j] = member[i][end_member_check[j]] - node_values[j]
def error_at_node():
for i in reversed(sorted(error_at_nodes.keys())):
if i not in end_nodes and i not in start_nodes:
temp = 0.0
for j in network[i][1]:
temp = temp+ path_weights[str(i)+"_"+str(j)] * error_at_nodes[j]
error_at_nodes[i] = round(node_values[i]*(1-node_values[i])*temp,6)
def update_path_weight(x,y):
if str(x)+'_'+str(y) in path_weights:
path_weights[str(x)+'_'+str(y)] = round(path_weights[str(x)+'_'+str(y)] + (alpha* error_at_nodes[y]*node_values[x]),6)
else :
print "Invalid path"
return False
def update_all_path_weights():
for i in network.keys():
for j in network[i][1]:
update_path_weight(i,j)
def accept():
global tolerance_error ,alpha,threshold,total_layers,network_dimensions, start_nodes, end_nodes,cluster_count,data_parmeter_count , training
tolerance_error = float(raw_input())
alpha = float(raw_input())
threshold = float(raw_input())
total_layers = int(raw_input())
network_dimensions = [int (i) for i in raw_input().split()]
ini_node = 11
curr_node = ini_node
for i in range(total_layers):
for j in range(network_dimensions[i]):
nodes.append(curr_node)
curr_node = curr_node + 1
curr_node = ini_node + ((i+1)*10)
create_network()
start_nodes = [i+ini_node for i in range((network_dimensions[0]))]
net_dim_len = len(network_dimensions)
end_nodes = [i+(net_dim_len*10+1) for i in range(network_dimensions[net_dim_len-1])]
assign_path_weights()
reset_node_values()
reset_error_values()
cluster_count = len(end_nodes)
data_parmeter_count = int (raw_input())
for i in range(data_parmeter_count):
training.append( [ float (i) for i in raw_input().split() ])
for i in range(data_parmeter_count):
checking.append( [ float (i) for i in raw_input().split() ])
def assign_path_weights():
k = 0
x =0
weights = [float (i) for i in raw_input().split()]
for i in range(len(network_dimensions)-1):
x = x+(network_dimensions[i]*network_dimensions[i+1])
if len(weights) < x:
raise ValueError("Provide Sufficient path weights ")
for i in sorted(network.keys()):
for j in network[i][1]:
path_weights[str(i)+"_"+str(j)] = weights[k]
k = k+1
def create_network():
for i in nodes:
network[i] = [list(),list()]
for i in nodes:
for j in nodes:
if (i/10) == ((j/10)+1):
network[i][0].append(j)
elif (i/10) == ((j/10)-1):
network[i][1].append(j)
def is_err_not_minimum():
flag = False
for i in end_nodes:
if abs(error_at_nodes[i]) > tolerance_error:
flag = True
return flag
def create_cluster (c, lists):
global vector,v_center,member
create_initialvector(c,lists)
while not is_prev_vector_same():
get_cluster_centers(c,lists)
get_dist_from_clu_center(c,lists)
calculate_min_distance_index()
update_vector(c,lists)
member = [list(i) for i in zip(*vector )]
def is_prev_vector_same():
return prev_vector==vector
def update_vector(c,lists):
global vector, prev_vector,min_dist_index, v_center , distance
v_center = list()
prev_vector = vector
vector = list()
n = len(lists[0])
for i in range (c):
vector.append([])
for j in range(n):
vector[i].append(0)
for i in range(n):
vector[min_dist_index[i]][i] = 1
min_dist_index = list()
def calculate_min_distance_index():
global min_dist_index , distance
x = list()
for j in range(len(distance[0])):
for i in range(len(distance)):
x.append( distance[i][j])
min_dist_index.append( x.index(min (x)))
x = list()
distance = list()
def get_dist_from_clu_center(c,lists):
global v_center, distance
x =list()
m = len(lists)
n = len(lists[0])
temp = 0.0
for i in range(m):
for j in range(n):
for k in range(m):
temp = temp + ((lists[k][j] - v_center[i][k])**2)
x.append(math.sqrt(temp))
temp = 0.0
distance.append(x)
x = list()
def get_cluster_centers(c,lists):
global v_center,vector
v_center = list()
center = list()
m = len(lists)
n = len(lists[0])
temp = 0.0
denominator = 0.0
for i in vector:
for k in lists:
for l in range(n):
temp = temp + i[l]* k[l]
denominator = denominator + i[l]
if denominator == 0:
denominator = 1
center.append(float(temp)/denominator)
temp = 0.0
denominator = 0.0
v_center.append(center)
center = list()
def create_initialvector(c,lists):
global vector
for i in range (c):
vector.append([])
for j in range(len(lists[0])):
vector[i].append(0)
x = 0
for i in range(len(lists[0])):
vector[x][i] =1
x = x+1
if x == c:
x= 0
def assign_inputs_to_start_nodes():
x = [list(j) for j in zip(start_nodes,[i for i in range(len(training))]*len(start_nodes))]
for i in range(len(x)):
parameter_assigned[x[i][0]] = x[i][1]
def assign_outputs_to_end_nodes():
x =[ list(j) for j in zip (end_nodes,[i for i in range(len(member[0]))])]
for i in range(len(x)):
end_member_check[x[i][0]] = x[i][1]
def main():
accept()
create_cluster(cluster_count,training)
assign_inputs_to_start_nodes()
assign_outputs_to_end_nodes()
for i in range(len(training[0])):
iterations = 0
pass_values_to_network(i)
while is_err_not_minimum():
iterations = iterations +1
pass_values_to_network(i)
total_error(i)
error_at_node()
update_all_path_weights()
temp = []
for i in end_nodes:
temp.append(node_values[i])
member.append(temp)
reset_node_values()
reset_error_values()
print "\nPath weights after training"
for i in path_weights:
print i+" "+str(path_weights[i])
for i in range(len(training[0])):
pass_check_values_to_network(i)
while is_err_not_minimum():
iterations = iterations +1
pass_check_values_to_network(i)
total_error(i)
error_at_node()
update_all_path_weights()
temp = []
for i in end_nodes:
temp.append(node_values[i])
member.append(temp)
del member[:len(training[0])]
print "\nMembership"
for i in member:
print i
if __name__ == "__main__":
main()
|
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: Simplified BSD
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> iris = fetch_mldata('iris')
>>> iris.target[0]
1
>>> print(iris.data[0])
[-0.555556 0.25 -0.864407 -0.916667]
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True)
>>> print(leuk.data.shape[0])
72
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0')
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
|
|
#!/usr/bin/env python
"""
Developer script to convert yaml periodic table to json format.
Created on Nov 15, 2011
"""
from __future__ import division
import json
from itertools import product
import ruamel.yaml as yaml
import re
from monty.serialization import loadfn
from pymatgen import Element
from pymatgen.core.periodic_table import get_el_sp
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 15, 2011"
def test_yaml():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
print(data)
def test_json():
with open('periodic_table.json', 'r') as f:
data = json.load(f)
print(data)
def parse_oxi_state():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('oxidation_states.txt', 'r')
oxidata = f.read()
f.close()
oxidata = re.sub('[\n\r]', '', oxidata)
patt = re.compile('<tr>(.*?)</tr>', re.MULTILINE)
for m in patt.finditer(oxidata):
line = m.group(1)
line = re.sub('</td>', '', line)
line = re.sub('(<td>)+', '<td>', line)
line = re.sub('</*a[^>]*>', '', line)
el = None
oxistates = []
common_oxi = []
for tok in re.split('<td>', line.strip()):
m2 = re.match(r"<b>([A-Z][a-z]*)</b>", tok)
if m2:
el = m2.group(1)
else:
m3 = re.match(r"(<b>)*([\+\-]\d)(</b>)*", tok)
if m3:
oxistates.append(int(m3.group(2)))
if m3.group(1):
common_oxi.append(int(m3.group(2)))
if el in data:
del data[el]['Max oxidation state']
del data[el]['Min oxidation state']
del data[el]['Oxidation_states']
del data[el]['Common_oxidation_states']
data[el]['Oxidation states'] = oxistates
data[el]['Common oxidation states'] = common_oxi
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
def parse_ionic_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('ionic_radii.csv', 'r')
radiidata = f.read()
f.close()
radiidata = radiidata.split("\r")
header = radiidata[0].split(",")
for i in range(1, len(radiidata)):
line = radiidata[i]
toks = line.strip().split(",")
suffix = ""
name = toks[1]
if len(name.split(" ")) > 1:
suffix = "_" + name.split(" ")[1]
el = toks[2]
ionic_radii = {}
for j in range(3, len(toks)):
m = re.match("^\s*([0-9\.]+)", toks[j])
if m:
ionic_radii[int(header[j])] = float(m.group(1))
if el in data:
data[el]['Ionic_radii' + suffix] = ionic_radii
if suffix == '_hs':
data[el]['Ionic_radii'] = ionic_radii
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
def parse_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('radii.csv', 'r')
radiidata = f.read()
f.close()
radiidata = radiidata.split("\r")
header = radiidata[0].split(",")
for i in range(1, len(radiidata)):
line = radiidata[i]
toks = line.strip().split(",")
el = toks[1]
try:
atomic_radii = float(toks[3]) / 100
except Exception:
atomic_radii = toks[3]
try:
atomic_radii_calc = float(toks[4]) / 100
except Exception:
atomic_radii_calc = toks[4]
try:
vdw_radii = float(toks[5]) / 100
except Exception:
vdw_radii = toks[5]
if el in data:
data[el]['Atomic radius'] = atomic_radii
data[el]['Atomic radius calculated'] = atomic_radii_calc
data[el]['Van der waals radius'] = vdw_radii
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def update_ionic_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
for el, d in data.items():
if "Ionic_radii" in d:
d["Ionic radii"] = {k: v / 100
for k, v in d["Ionic_radii"].items()}
del d["Ionic_radii"]
if "Ionic_radii_hs" in d:
d["Ionic radii hs"] = {k: v / 100
for k, v in d["Ionic_radii_hs"].items()}
del d["Ionic_radii_hs"]
if "Ionic_radii_ls" in d:
d["Ionic radii ls"] = {k: v / 100
for k, v in d["Ionic_radii_ls"].items()}
del d["Ionic_radii_ls"]
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def parse_shannon_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
from openpyxl import load_workbook
import collections
wb = load_workbook('Shannon Radii.xlsx')
print(wb.get_sheet_names())
sheet = wb["Sheet1"]
i = 2
radii = collections.defaultdict(dict)
while sheet["E%d" % i].value:
if sheet["A%d" % i].value:
el = sheet["A%d" % i].value
if sheet["B%d" % i].value:
charge = int(sheet["B%d" % i].value)
radii[el][charge] = dict()
if sheet["C%d" % i].value:
cn = sheet["C%d" % i].value
if cn not in radii[el][charge]:
radii[el][charge][cn] = dict()
if sheet["D%d" % i].value is not None:
spin = sheet["D%d" % i].value
else:
spin = ""
# print("%s - %d - %s" % (el, charge, cn))
radii[el][charge][cn][spin] = {
"crystal_radius": float(sheet["E%d" % i].value),
"ionic_radius": float(sheet["F%d" % i].value),
}
i += 1
for el in radii.keys():
if el in data:
data[el]["Shannon radii"] = dict(radii[el])
with open('periodic_table.yaml', 'w') as f:
yaml.safe_dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def gen_periodic_table():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def gen_iupac_ordering():
periodic_table = loadfn("periodic_table.json")
order = [([18], range(6, 0, -1)), # noble gasses
([1], range(7, 1, -1)), # alkali metals
([2], range(7, 1, -1)), # alkali earth metals
(range(17, 2, -1), [9]), # actinides
(range(17, 2, -1), [8]), # lanthanides
([3], (5, 4)), # Y, Sc
([4], (6, 5, 4)), # Hf -> Ti
([5], (6, 5, 4)), # Ta -> V
([6], (6, 5, 4)), # W -> Cr
([7], (6, 5, 4)), # Re -> Mn
([8], (6, 5, 4)), # Os -> Fe
([9], (6, 5, 4)), # Ir -> Co
([10], (6, 5, 4)), # Pt -> Ni
([11], (6, 5, 4)), # Au -> Cu
([12], (6, 5, 4)), # Hg -> Zn
([13], range(6, 1, -1)), # Tl -> B
([14], range(6, 1, -1)), # Pb -> C
([15], range(6, 1, -1)), # Bi -> N
([1], [1]), # Hydrogen
([16], range(6, 1, -1)), # Po -> O
([17], range(6, 1, -1))] # At -> F
order = sum([list(product(x, y)) for x, y in order], [])
iupac_ordering_dict = dict(zip(
[Element.from_row_and_group(row, group) for group, row in order],
range(len(order))))
# first clean periodic table of any IUPAC ordering
for el in periodic_table:
periodic_table[el].pop('IUPAC ordering', None)
# now add iupac ordering
for el in periodic_table:
if 'IUPAC ordering' in periodic_table[el]:
# sanity check that we don't cover the same element twice
raise KeyError("IUPAC ordering already exists for {}".format(el))
periodic_table[el]['IUPAC ordering'] = iupac_ordering_dict[get_el_sp(el)]
if __name__ == "__main__":
parse_shannon_radii()
# gen_periodic_table()
|
|
import base64
import functools
import os
from django.conf import settings
from django.contrib.auth import login, logout
from django.core import signing
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.utils.encoding import force_bytes
from django.utils.http import is_safe_url
from django.utils.html import format_html
from django.utils.translation import ugettext, ugettext_lazy as _
from rest_framework import serializers
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import detail_route
from rest_framework.mixins import (
DestroyModelMixin, ListModelMixin, RetrieveModelMixin, UpdateModelMixin)
from rest_framework.permissions import (
AllowAny, BasePermission, IsAuthenticated)
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from waffle.decorators import waffle_switch
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.access.models import GroupUser
from olympia.amo import messages
from olympia.amo.decorators import write
from olympia.api.authentication import (
JWTKeyAuthentication, WebTokenAuthentication)
from olympia.api.permissions import AnyOf, ByHttpMethod, GroupPermission
from olympia.users import tasks
from olympia.users.models import UserProfile, UserNotification
from olympia.users.notifications import NOTIFICATIONS
from . import verify
from .serializers import (
AccountSuperCreateSerializer, PublicUserProfileSerializer,
UserNotificationSerializer, UserProfileSerializer)
from .utils import fxa_login_url, generate_fxa_state
log = olympia.core.logger.getLogger('accounts')
ERROR_AUTHENTICATED = 'authenticated'
ERROR_NO_CODE = 'no-code'
ERROR_NO_PROFILE = 'no-profile'
ERROR_NO_USER = 'no-user'
ERROR_STATE_MISMATCH = 'state-mismatch'
ERROR_STATUSES = {
ERROR_AUTHENTICATED: 400,
ERROR_NO_CODE: 422,
ERROR_NO_PROFILE: 401,
ERROR_STATE_MISMATCH: 400,
}
LOGIN_ERROR_MESSAGES = {
ERROR_AUTHENTICATED: _(u'You are already logged in.'),
ERROR_NO_CODE:
_(u'Your login attempt could not be parsed. Please try again.'),
ERROR_NO_PROFILE:
_(u'Your Firefox Account could not be found. Please try again.'),
ERROR_STATE_MISMATCH: _(u'You could not be logged in. Please try again.'),
}
API_TOKEN_COOKIE = 'api_auth_token'
def safe_redirect(url, action):
if not is_safe_url(url):
url = reverse('home')
log.info(u'Redirecting after {} to: {}'.format(action, url))
return HttpResponseRedirect(url)
def find_user(identity):
"""Try to find a user for a Firefox Accounts profile. If the account
hasn't been migrated we'll need to do the lookup by email but we should
use the ID after that so check both. If we get multiple users we're in
some weird state where the accounts need to be merged but that behaviour
hasn't been defined so let it raise.
"""
try:
return UserProfile.objects.get(
Q(fxa_id=identity['uid']) | Q(email=identity['email']))
except UserProfile.DoesNotExist:
return None
except UserProfile.MultipleObjectsReturned:
# This shouldn't happen, so let it raise.
log.error(
'Found multiple users for {email} and {uid}'.format(**identity))
raise
def register_user(request, identity):
user = UserProfile.objects.create_user(
email=identity['email'], username=None, fxa_id=identity['uid'])
log.info('Created user {} from FxA'.format(user))
login(request, user)
return user
def update_user(user, identity):
"""Update a user's info from FxA if needed, as well as generating the id
that is used as part of the session/api token generation."""
if (user.fxa_id != identity['uid'] or
user.email != identity['email']):
log.info(
'Updating user info from FxA for {pk}. Old {old_email} {old_uid} '
'New {new_email} {new_uid}'.format(
pk=user.pk, old_email=user.email, old_uid=user.fxa_id,
new_email=identity['email'], new_uid=identity['uid']))
user.update(fxa_id=identity['uid'], email=identity['email'])
if user.auth_id is None:
# If the user didn't have an auth id (old user account created before
# we added the field), generate one for them.
user.update(auth_id=UserProfile._meta.get_field('auth_id').default())
def login_user(request, user, identity):
update_user(user, identity)
log.info('Logging in user {} from FxA'.format(user))
user.log_login_attempt(True)
login(request, user)
def fxa_error_message(message):
login_help_url = (
'https://support.mozilla.org/kb/access-your-add-ons-firefox-accounts')
return format_html(
u'{error} <a href="{url}">{help_text}</a>',
url=login_help_url, help_text=_(u'Need help?'),
error=message)
def render_error(request, error, next_path=None, format=None):
if format == 'json':
status = ERROR_STATUSES.get(error, 422)
response = Response({'error': error}, status=status)
else:
if not is_safe_url(next_path):
next_path = None
messages.error(
request, fxa_error_message(LOGIN_ERROR_MESSAGES[error]),
extra_tags='fxa')
if next_path is None:
response = HttpResponseRedirect(reverse('users.login'))
else:
response = HttpResponseRedirect(next_path)
return response
def parse_next_path(state_parts):
next_path = None
if len(state_parts) == 2:
# The = signs will be stripped off so we need to add them back
# but it only cares if there are too few so add 4 of them.
encoded_path = state_parts[1] + '===='
try:
next_path = base64.urlsafe_b64decode(
force_bytes(encoded_path)).decode('utf-8')
except TypeError:
log.info('Error decoding next_path {}'.format(
encoded_path))
pass
if not is_safe_url(next_path):
next_path = None
return next_path
def with_user(format, config=None):
def outer(fn):
@functools.wraps(fn)
@write
def inner(self, request):
if config is None:
if hasattr(self, 'get_fxa_config'):
fxa_config = self.get_fxa_config(request)
else:
fxa_config = (
settings.FXA_CONFIG[settings.DEFAULT_FXA_CONFIG_NAME])
else:
fxa_config = config
if request.method == 'GET':
data = request.query_params
else:
data = request.data
state_parts = data.get('state', '').split(':', 1)
state = state_parts[0]
next_path = parse_next_path(state_parts)
if not data.get('code'):
log.info('No code provided.')
return render_error(
request, ERROR_NO_CODE, next_path=next_path, format=format)
elif (not request.session.get('fxa_state') or
request.session['fxa_state'] != state):
log.info(
'State mismatch. URL: {url} Session: {session}'.format(
url=data.get('state'),
session=request.session.get('fxa_state'),
))
return render_error(
request, ERROR_STATE_MISMATCH, next_path=next_path,
format=format)
elif request.user.is_authenticated():
response = render_error(
request, ERROR_AUTHENTICATED, next_path=next_path,
format=format)
# If the api token cookie is missing but we're still
# authenticated using the session, add it back.
if API_TOKEN_COOKIE not in request.COOKIES:
log.info('User %s was already authenticated but did not '
'have an API token cookie, adding one.',
request.user.pk)
response = add_api_token_to_response(
response, request.user)
return response
try:
identity = verify.fxa_identify(data['code'], config=fxa_config)
except verify.IdentificationError:
log.info('Profile not found. Code: {}'.format(data['code']))
return render_error(
request, ERROR_NO_PROFILE, next_path=next_path,
format=format)
else:
return fn(
self, request, user=find_user(identity), identity=identity,
next_path=next_path)
return inner
return outer
def generate_api_token(user):
"""Generate a new API token for a given user."""
data = {
'auth_hash': user.get_session_auth_hash(),
'user_id': user.pk,
}
return signing.dumps(data, salt=WebTokenAuthentication.salt)
def add_api_token_to_response(response, user):
"""Generate API token and add it to the response (both as a `token` key in
the response if it was json and by setting a cookie named API_TOKEN_COOKIE.
"""
token = generate_api_token(user)
if hasattr(response, 'data'):
response.data['token'] = token
# Also include the API token in a session cookie, so that it is
# available for universal frontend apps.
response.set_cookie(
API_TOKEN_COOKIE,
token,
max_age=settings.SESSION_COOKIE_AGE,
secure=settings.SESSION_COOKIE_SECURE,
httponly=settings.SESSION_COOKIE_HTTPONLY)
return response
def remove_api_token_cookie(response):
"""Delete the api token cookie."""
response.delete_cookie(API_TOKEN_COOKIE)
class FxAConfigMixin(object):
def get_config_name(self, request):
return request.GET.get('config', self.DEFAULT_FXA_CONFIG_NAME)
def get_allowed_configs(self):
return getattr(
self, 'ALLOWED_FXA_CONFIGS', [self.DEFAULT_FXA_CONFIG_NAME])
def get_fxa_config(self, request):
config_name = self.get_config_name(request)
if config_name in self.get_allowed_configs():
return settings.FXA_CONFIG[config_name]
log.info('Using default FxA config instead of {}'.format(config_name))
return settings.FXA_CONFIG[self.DEFAULT_FXA_CONFIG_NAME]
class LoginStartBaseView(FxAConfigMixin, APIView):
def get(self, request):
request.session.setdefault('fxa_state', generate_fxa_state())
return HttpResponseRedirect(
fxa_login_url(
config=self.get_fxa_config(request),
state=request.session['fxa_state'],
next_path=request.GET.get('to'),
action=request.GET.get('action', 'signin')))
class LoginStartView(LoginStartBaseView):
DEFAULT_FXA_CONFIG_NAME = settings.DEFAULT_FXA_CONFIG_NAME
ALLOWED_FXA_CONFIGS = settings.ALLOWED_FXA_CONFIGS
class AuthenticateView(FxAConfigMixin, APIView):
DEFAULT_FXA_CONFIG_NAME = settings.DEFAULT_FXA_CONFIG_NAME
ALLOWED_FXA_CONFIGS = settings.ALLOWED_FXA_CONFIGS
authentication_classes = (SessionAuthentication,)
@with_user(format='html')
def get(self, request, user, identity, next_path):
if user is None:
user = register_user(request, identity)
fxa_config = self.get_fxa_config(request)
if fxa_config.get('skip_register_redirect'):
response = safe_redirect(next_path, 'register')
else:
response = safe_redirect(reverse('users.edit'), 'register')
else:
login_user(request, user, identity)
response = safe_redirect(next_path, 'login')
add_api_token_to_response(response, user)
return response
def logout_user(request, response):
logout(request)
remove_api_token_cookie(response)
class SessionView(APIView):
permission_classes = [IsAuthenticated]
def delete(self, request, *args, **kwargs):
response = Response({'ok': True})
logout_user(request, response)
return response
class AllowSelf(BasePermission):
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return request.user.is_authenticated() and obj == request.user
class AccountViewSet(RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin,
GenericViewSet):
permission_classes = [
ByHttpMethod({
'get': AllowAny,
'head': AllowAny,
'options': AllowAny, # Needed for CORS.
# To edit a profile it has to yours, or be an admin.
'patch': AnyOf(AllowSelf, GroupPermission(
amo.permissions.USERS_EDIT)),
'delete': AnyOf(AllowSelf, GroupPermission(
amo.permissions.USERS_EDIT)),
}),
]
def get_queryset(self):
return UserProfile.objects.all()
def get_object(self):
if hasattr(self, 'instance'):
return self.instance
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AccountViewSet, self).get_object()
# action won't exist for other classes that are using this ViewSet.
can_view_instance = (
not getattr(self, 'action', None) or
self.self_view or
self.admin_viewing or
self.instance.is_public)
if can_view_instance:
return self.instance
else:
raise Http404
def get_lookup_field(self, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# the username.
lookup_field = 'username'
return lookup_field
@property
def self_view(self):
return (
self.request.user.is_authenticated() and
self.get_object() == self.request.user)
@property
def admin_viewing(self):
return acl.action_allowed_user(
self.request.user, amo.permissions.USERS_EDIT)
def get_serializer_class(self):
if self.self_view or self.admin_viewing:
return UserProfileSerializer
else:
return PublicUserProfileSerializer
def perform_destroy(self, instance):
if instance.is_developer:
raise serializers.ValidationError(ugettext(
u'Developers of add-ons or themes cannot delete their '
u'account. You must delete all add-ons and themes linked to '
u'this account, or transfer them to other users.'))
return super(AccountViewSet, self).perform_destroy(instance)
@detail_route(
methods=['delete'], permission_classes=[
AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT))])
def picture(self, request, pk=None):
user = self.get_object()
user.update(picture_type='')
log.debug(u'User (%s) deleted photo' % user)
tasks.delete_photo.delay(user.picture_path)
return self.retrieve(request)
class ProfileView(APIView):
authentication_classes = [JWTKeyAuthentication, WebTokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
account_viewset = AccountViewSet(
request=request,
permission_classes=self.permission_classes,
kwargs={'pk': unicode(self.request.user.pk)})
account_viewset.format_kwarg = self.format_kwarg
return account_viewset.retrieve(request)
class AccountSuperCreate(APIView):
authentication_classes = [JWTKeyAuthentication]
permission_classes = [
IsAuthenticated,
GroupPermission(amo.permissions.ACCOUNTS_SUPER_CREATE)]
@waffle_switch('super-create-accounts')
def post(self, request):
serializer = AccountSuperCreateSerializer(data=request.data)
if not serializer.is_valid():
return Response({'errors': serializer.errors},
status=422)
data = serializer.data
group = serializer.validated_data.get('group', None)
user_token = os.urandom(4).encode('hex')
username = data.get('username', 'super-created-{}'.format(user_token))
fxa_id = data.get('fxa_id', None)
email = data.get('email', '{}@addons.mozilla.org'.format(username))
user = UserProfile.objects.create(
username=username,
email=email,
fxa_id=fxa_id,
display_name='Super Created {}'.format(user_token),
notes='auto-generated from API')
user.save()
if group:
GroupUser.objects.create(user=user, group=group)
login(request, user)
request.session.save()
log.info(u'API user {api_user} created and logged in a user from '
u'the super-create API: user_id: {user.pk}; '
u'user_name: {user.username}; fxa_id: {user.fxa_id}; '
u'group: {group}'
.format(user=user, api_user=request.user, group=group))
cookie = {
'name': settings.SESSION_COOKIE_NAME,
'value': request.session.session_key,
}
cookie['encoded'] = '{name}={value}'.format(**cookie)
return Response({
'user_id': user.pk,
'username': user.username,
'email': user.email,
'display_name': user.display_name,
'groups': list((g.pk, g.name, g.rules) for g in user.groups.all()),
'fxa_id': user.fxa_id,
'session_cookie': cookie,
}, status=201)
class AccountNotificationViewSet(ListModelMixin, GenericViewSet):
"""Returns account notifications.
If not already set by the user, defaults will be returned.
"""
permission_classes = [IsAuthenticated]
# We're pushing the primary permission checking to AccountViewSet for ease.
account_permission_classes = [
AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT))]
serializer_class = UserNotificationSerializer
paginator = None
def get_account_viewset(self):
if not hasattr(self, 'account_viewset'):
self.account_viewset = AccountViewSet(
request=self.request,
permission_classes=self.account_permission_classes,
kwargs={'pk': self.kwargs['user_pk']})
return self.account_viewset
def _get_default_object(self, notification):
return UserNotification(
user=self.get_account_viewset().get_object(),
notification_id=notification.id,
enabled=notification.default_checked)
def get_queryset(self):
queryset = UserNotification.objects.filter(
user=self.get_account_viewset().get_object())
# Put it into a dict so we can easily check for existence.
set_notifications = {
user_nfn.notification.short: user_nfn for user_nfn in queryset}
out = []
for notification in NOTIFICATIONS:
out.append(set_notifications.get(
notification.short, # It's been set by the user.
self._get_default_object(notification))) # Otherwise, default.
return out
def create(self, request, *args, **kwargs):
# Loop through possible notifications.
queryset = self.get_queryset()
for notification in queryset:
# Careful with ifs. Enabled will be None|True|False.
enabled = request.data.get(notification.notification.short)
if enabled is not None:
serializer = self.get_serializer(
notification, partial=True, data={'enabled': enabled})
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(self.get_serializer(queryset, many=True).data)
|
|
# Written by Ross Cohen
# see LICENSE.txt for license information
from bencode import bdecode, bencode
from crypt import crypt
from errno import EINTR
import os
try:
import select
except ImportError:
import selectpoll as select
import sha
import socket
import SRP
from cStringIO import StringIO
import struct
AUTH_UNUSED = 0
AUTH_SOCK = 1
AUTH_CONNECTION = 2
class Agent:
def __init__(self):
self.auth_path = None
self.auth_file = None
self.poll_obj = select.poll()
self.hstatus = {}
self.identities = {}
self.allow_shutdown = 0
return
def listen_sock(self, auth_path, auth_file):
if self.auth_file is not None:
print 'Auth socket already set'
return None
self.auth_path = auth_path
self.auth_file = auth_file
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(auth_file)
sock.listen(8)
self._new_socket(AUTH_SOCK, sock)
return sock
def listen(self):
if self.auth_file is None:
print 'No socket to listen on'
return 1
self.shutdown_flag = 0
while not self.shutdown_flag:
try:
sock_list = self.poll_obj.poll()
except select.error, reason:
if reason[0] != EINTR:
raise
else:
self._after_poll(sock_list)
self.cleanup()
return 0
def cleanup(self):
for fd, status in self.hstatus.items():
del self.hstatus[fd]
self.poll_obj.unregister(fd)
status['sock'].close()
os.unlink(self.auth_file)
if self.auth_path is not None:
auth_path = self.auth_path
self.auth_path = None
os.rmdir(auth_path)
return
def _process_message(self, sock):
status = self.hstatus[sock.fileno()]
input = status['input']
input.seek(status['in_offset'])
data = input.read(4)
if len(data) != 4:
return
msg_len, = struct.unpack('<i', data)
if msg_len <= 0 or msg_len > 1024:
self._write_error(sock, 'Bad message lenth')
self._close_socket(sock)
return
data = input.read(msg_len)
if len(data) < msg_len:
return
status['in_offset'] += 4 + msg_len
try:
msg = bdecode(data)
except ValueError:
self._write_error(sock, 'Bad message')
self._close_socket(sock)
return
type = msg['type']
if type == 'CDV_AGENT_ADD_PASSWORD':
id = sha.new('password' + msg['password']).digest()
self.identities[id] = msg['password']
self._write_answer(sock, bencode({'id': id}))
elif type == 'CDV_AGENT_ADD_SECRET':
id = sha.new('public hash check' + msg['secret']).digest()
self.identities[id] = msg['secret']
self._write_answer(sock, bencode({'id': id}))
elif type == 'CDV_AGENT_ADD_ENCRYPTED_SECRET':
if not self.identities.has_key(msg['id']):
self._write_error(sock, 'No such identity')
return
secret = crypt(msg['secret'], self.identities[msg['id']])[0]
id = sha.new('public hash check' + secret).digest()
if id != msg['secret_id']:
self._write_error(sock, 'id does not match')
return
self.identities[id] = secret
self._write_answer(sock, bencode({}))
elif type == 'CDV_AGENT_QUERY_IDENTITY':
known = 0
if self.identities.has_key(msg['id']):
known = 1
self._write_answer(sock, bencode({'known': known}))
elif type == 'CDV_AGENT_SRP_PRIVATE_KEY':
x = SRP.private_key(msg['user'], msg['s'], self.identities[msg['id']])
self._write_answer(sock, bencode({'x': x}))
elif type == 'CDV_AGENT_SESSION_KEY':
if not self.identities.has_key(msg['id']):
self._write_error(sock, 'No such identity')
return
if len(msg['salt1']) < 20 or len(msg['salt2']) < 20:
self._write_error(sock, 'Bad salts')
return
if msg['salt1'] == msg['salt2']:
self._write_error(sock, 'Bad salts')
return
base = 'session key' + self.identities[msg['id']] + \
msg['salt1'] + msg['salt2']
key = sha.new(base).digest()
answer = {'key': key}
self._write_answer(sock, bencode(answer))
elif type == 'CDV_AGENT_SHUTDOWN' and self.allow_shutdown:
self.shutdown_flag = 1
else:
self._write_error(sock, 'Unknown command: ' + type)
return
def _after_poll(self, sock_list):
for fd, event in sock_list:
status = self.hstatus[fd]
sock = status['sock']
if status['type'] == AUTH_UNUSED:
return
elif status['type'] == AUTH_SOCK:
tries = 3
while tries:
try:
nsock = sock.accept()
except socket.error, reason:
if reason[0] != EINTR:
raise
tries -= 1
else:
break
if tries == 0:
raise
# XXX: python doesn't support SO_PEERCRED
#sock.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED)
self._new_socket(AUTH_CONNECTION, nsock[0])
elif status['type'] == AUTH_CONNECTION:
if event & select.POLLHUP:
self._close_socket(sock)
return
if event & select.POLLIN:
data = sock.recv(1024)
if len(data) == 0:
self._close_socket(sock)
return
status['input'].seek(0, 2)
status['input'].write(data)
self._process_message(sock)
if event & select.POLLOUT:
self._flush_socket(sock)
return
def _new_socket(self, type, sock):
sock.setblocking(0)
self.hstatus[sock.fileno()] = {'type': type,
'input': StringIO(),
'output': StringIO(),
'in_offset': 0,
'out_offset': 0,
'sock': sock}
flags = select.POLLIN
if type != AUTH_SOCK:
flags |= select.POLLHUP
self.poll_obj.register(sock, flags)
return
def _close_socket(self, sock):
fileno = sock.fileno()
self.poll_obj.unregister(fileno)
del self.hstatus[fileno]
sock.close()
return
def _write_answer(self, sock, data):
data = struct.pack('<i', len(data)) + data
status = self.hstatus[sock.fileno()]
status['output'].seek(0, 2)
written = 0
if status['out_offset'] == status['output'].tell():
written = sock.send(data)
if written != len(data):
status['output'].write(data[written:])
self.poll_obj.register(sock.fileno(),
select.POLLIN|select.POLLOUT|select.POLLHUP)
return
def _write_error(self, sock, msg):
self._write_answer(sock, bencode({'error': msg}))
return
def _flush_socket(self, sock):
status = self.hstatus[sock.fileno()]
status['output'].seek(status['out_offset'])
data = status['output'].read(1024)
while len(data):
written = sock.send(data)
status['out_offset'] += written
if written < len(data):
break
data = status['output'].read(1024)
if len(data) == 0:
self.poll_obj.register(sock.fileno(), select.POLLIN|select.POLLHUP)
return
|
|
# Time-stamp: <2008-03-19 15:01:53 Tao Liu>
"""Module to read the motif scan data file which is in binary format.
Copyright (c) 2007 Tao Liu <taoliu@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: Tao Liu
@contact: taoliu@jimmy.harvard.edu
"""
# ------------------------------------
# python modules
# ------------------------------------
import re
import Cistrome
from Cistrome.TabIO import FWTrackI,RangeI
import sys
from struct import unpack as upk
# ------------------------------------
# constants
# ------------------------------------
__version__ = "MR $Revision$"
__author__ = "Tao Liu <taoliu@jimmy.harvard.edu>"
__doc__ = "Calculate Relationship of Motifs"
LOG = False
GENOME_SIZE = {"mm8":2644077689L,
"hg18":3080419480L}
# ------------------------------------
# Misc functions
# ------------------------------------
def mlen (mfhd) :
"""Return the motif length from motif matrix data file.
mfhd : the file object for motif matrix file
"""
mfhd.seek(0)
return len(mfhd.readlines())-1
def mconsensus (mfhd):
"""Return the motif consensus for a motif matrix data file.
mfhd : the file object for motif matrix file
"""
mfhd.seek(0)
consensus_seq=""
headline = mfhd.readline().rstrip()
consensus_field_num = headline.split("\t").index("Consensus")
for l in mfhd.readlines():
l = l.rstrip()
consensus_seq+=l.split("\t")[consensus_field_num]
return consensus_seq
def read_motif_total_num (motif_fhd,species):
"""Only read the header of binary file, return the total number of
motif scan hits regardless of cutoff.
"""
if species == "hg18":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chr20":[0,0],"chr21":[0,0],
"chr22":[0,0],"chrX":[0,0],"chrY":[0,0]
}
elif species == "mm8":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chrX":[0,0],"chrY":[0,0]
}
else:
raise Exception("Only hg18/mm8 supported!")
chromosomes = chromosomes_fp.keys()
motif_fhd.seek(0)
# unpack the start pos
for chromosome in chromosomes:
chromosomes_fp[chromosome][0] = upk("<i",motif_fhd.read(4))[0]
motif_fhd.seek(124,1)
motif_fhd.seek(0,2)
# calculate number of hits
total_motif_hits = 0
for i in range(len(chromosomes)-1):
mh = (chromosomes_fp[chromosomes[i+1]][0]-chromosomes_fp[chromosomes[i]][0])/8
chromosomes_fp[chromosomes[i]][1] = mh
total_motif_hits += mh
# last one
mh = (motif_fhd.tell()-chromosomes_fp[chromosomes[-1]][0])/8
chromosomes_fp[chromosomes[-1]][1]=mh
total_motif_hits += mh
return total_motif_hits
def read_motif (motif_fhd,species,cutoff=0):
"""Read motif scan result, and return a TabIO.FWTrackI object
containing the motif locations.
motif_fhd : a file handler for binary motif scan result
species : must be "mm8" for mouse or "hg18" for human
cutoff : cutoff for the motif scan score
"""
motif_range_list = FWTrackI(fw=0)
if species == "hg18":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chr20":[0,0],"chr21":[0,0],
"chr22":[0,0],"chrX":[0,0],"chrY":[0,0]
}
elif species == "mm8":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chrX":[0,0],"chrY":[0,0]
}
else:
raise Exception("Only hg18/mm8 supported!")
chromosomes = chromosomes_fp.keys()
motif_fhd.seek(0)
# unpack the start pos
for chromosome in chromosomes:
chromosomes_fp[chromosome][0] = upk("<i",motif_fhd.read(4))[0]
motif_fhd.seek(124,1)
motif_fhd.seek(0,2)
# calculate number of hits
total_motif_hits = 0
for i in range(len(chromosomes)-1):
mh = (chromosomes_fp[chromosomes[i+1]][0]-chromosomes_fp[chromosomes[i]][0])/8
chromosomes_fp[chromosomes[i]][1] = mh
total_motif_hits += mh
# last one
mh = (motif_fhd.tell()-chromosomes_fp[chromosomes[-1]][0])/8
chromosomes_fp[chromosomes[-1]][1]=mh
total_motif_hits += mh
# read and write
read_motif_hits = 0
portion = 0
for chromosome in chromosomes:
motif_fhd.seek(chromosomes_fp[chromosome][0],0)
for i in range(chromosomes_fp[chromosome][1]):
read_motif_hits += 1
portion = float(read_motif_hits)/total_motif_hits
if LOG:
sys.stdout.write("\r%.1f%% %s" % (portion*100,"#"*int(portion*50)))
sys.stdout.flush()
loc = upk("<i",motif_fhd.read(4))[0]
score = upk("<f",motif_fhd.read(4))[0]
motif_fhd.read(4)
if score < 0:
strand = -1
score = score*-1
else:
strand = 1
#ofhd.write("%s\t%d\t%d\t%s_%s_%d\t%.2f\t%s\n" % (chromosome,loc-1,loc+motif_len-1,motif,chromosome,i,score,strand))
if score > cutoff:
#print score,cutoff
motif_range_list.add_range(chromosome,RangeI(start=loc-1,end=loc,strand=strand))
#print loc-1
#sys.stdout.write("\n")
motif_range_list.merge_overlap()
return motif_range_list
def read_motif2 (motif_fhd,species,cutoff=0):
"""Read motif scan result, and return a TabIO.FWTrackI object
containing the motif locations.
* If the motif scan data file is not big, use this function to
load the whole file into memory. It may be faster than
read_motif().
motif_fhd : a file handler for binary motif scan result
species : must be "mm8" for mouse or "hg18" for human
cutoff : cutoff for the motif scan score
"""
motif_range_list = FWTrackI(fw=0)
if species == "hg18":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chr20":[0,0],"chr21":[0,0],
"chr22":[0,0],"chrX":[0,0],"chrY":[0,0]
}
chromosomes = ["chr1","chr2","chr3","chr4","chr5","chr6",
"chr7","chr8","chr9","chr10","chr11","chr12",
"chr13","chr14","chr15","chr16","chr17","chr18",
"chr19","chr20","chr21","chr22","chrX","chrY"]
elif species == "mm8":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chrX":[0,0],"chrY":[0,0]
}
chromosomes = ["chr1","chr2","chr3","chr4","chr5","chr6",
"chr7","chr8","chr9","chr10","chr11","chr12",
"chr13","chr14","chr15","chr16","chr17","chr18",
"chr19","chrX","chrY"]
else:
raise Exception("Only hg18/mm8 supported!")
motif_fhd.seek(0)
data = motif_fhd.read()
# unpack the start pos
p = 0
for chromosome in chromosomes:
chromosomes_fp[chromosome][0] = upk("<i",data[p:p+4])[0]
p += 128
# calculate number of hits
total_motif_hits = 0
for i in range(len(chromosomes)-1):
mh = (chromosomes_fp[chromosomes[i+1]][0]-chromosomes_fp[chromosomes[i]][0])/8
chromosomes_fp[chromosomes[i]][1] = mh
total_motif_hits += mh
# last one
mh = (len(data)-chromosomes_fp[chromosomes[-1]][0])/8
chromosomes_fp[chromosomes[-1]][1]=mh
total_motif_hits += mh
# read and write
read_motif_hits = 0
portion = 0
p = 0
n=0
for chromosome in chromosomes:
p = chromosomes_fp[chromosome][0]
for i in range(chromosomes_fp[chromosome][1]):
read_motif_hits += 1
portion = float(read_motif_hits)/total_motif_hits
if LOG:
sys.stdout.write("\r %.1f%% %s" % (portion*100,"#"*int(portion*50)))
sys.stdout.flush()
loc = upk("<i",data[p:p+4])[0]
score = upk("<f",data[p+4:p+8])[0]
p += 8
if score < 0:
strand = -1
score = score*-1
else:
strand = 1
#ofhd.write("%s\t%d\t%d\t%s_%s_%d\t%.2f\t%s\n" % (chromosome,loc-1,loc+motif_len-1,motif,chromosome,i,score,strand))
if score > cutoff:
#print score,cutoff
n+=1
motif_range_list.add_range(chromosome,RangeI(start=loc-1,end=loc,strand=strand))
#print loc-1
if LOG : sys.stdout.write("\n")
data = None
motif_range_list.merge_overlap()
#print n
return motif_range_list
def motif_count (track, motif_track):
"""Count how many motif discovered in a given track.
"""
return track.include(motif_track)
|
|
# -*- coding: utf-8 -*-
### get it from: http://www.crummy.com/software/BeautifulSoup/
import re
import sys
import os
import pycurl
import time
import datetime
import simplejson as json
ADC_COLOR = {
'CA': '#FF1F1F',
'CERN': '#AE3C51',
'DE': '#000000',
'ES': '#EDBF00',
'FR': '#0055A5',
'IT': '#009246',
'ND': '#6298FF',
'NL': '#D97529',
'TW': '#89000F',
'UK': '#356C20',
'US': '#00006B',
'RU': '#66008D'
}
CHARTS = [
["P1_Logfile", "Category A,B,C Analy - Logfile"],
["P2_Logfile", "Top 10 sites of Category A - Logfile"],
["P3_Logfile", "Cloud Analy of Category B - Logfile"],
["P4_Logfile", "Top 20 sites of Category C - Logfile"],
["P5_Logfile", "Cloud Analy of Category C - Logfile"],
["P6_Logfile", "Top 20 sites of Category E - Logfile"],
["P7_Logfile", "Cloud Analy of Category E - Logfile"]
]
# get cloud name
fjson = open('panda_queues.json', 'r')
data = fjson.read()
dic = json.loads(data)
fjson.close()
def get_document(logfile):
fjson = open(logfile, 'r')
data = fjson.read()
contents = json.loads(data)
fjson.close()
return contents
def is_this_category(string, category_pattern):
if re.search(category_pattern, str(string)) is None:
return False
else:
return True
def get_dic_dic_siteid():
global dic
dic_dic = {}
for queueinfo in dic:
sitename = queueinfo['agis_ssb_site_name']
pandasiteid = queueinfo['panda_siteID']
cloud = queueinfo['cloud']
dic_dic[pandasiteid] = {'sitename':sitename, 'cloud':cloud, 'pandasiteid': pandasiteid}
return dic_dic
dic_dic_siteid = get_dic_dic_siteid()
def get_dic_dic_ATLASsitename():
global dic
dic_dic = {}
for queueinfo in dic:
sitename = queueinfo['agis_ssb_site_name']
pandasiteid = queueinfo['panda_siteID']
cloud = queueinfo['cloud']
dic_dic[sitename] = {'sitename':sitename, 'cloud':cloud, 'pandasiteid': pandasiteid}
return dic_dic
dic_dic_ATLASsitename = get_dic_dic_ATLASsitename()
def merge_dic_dics():
global dic_dic_siteid, dic_dic_ATLASsitename
dic_dic_merged = dic_dic_siteid
dic_dic_merged.update(dic_dic_ATLASsitename)
return dic_dic_merged
dic_dic_merged = merge_dic_dics()
def get_cloud_name(site):
global dic_dic_merged
cloud = site
try:
cloud = dic_dic_merged[site]['cloud']
#print u'ln153', u'site', site, 'cloud', cloud
except KeyError:
cloud = site
return cloud
def get_sitecloud_name(dic, siteID):
global dic_dic_merged
cloud = siteID
site_name = siteID
try:
cloud = dic_dic_merged[siteID]['cloud']
except KeyError:
cloud = siteID
try:
site_name = dic_dic_merged[siteID]['sitename']
except KeyError:
site_name = siteID
return (site_name, cloud)
def is_in_buf(records, logDate, category, site, dnUser):
found = False
idx = 0
for record in records:
if record[1] == logDate and record[2] == category and record[3] == site and record[5] == dnUser:
found = True
break
idx += 1
if not found:
return None
else:
return idx
def parse_document(document):
P_category = {'A':0, 'B':0, 'C':0, 'E':0}
P_site = {'A':{}, 'B':{}, 'C':{}, 'E':{}}
P_cloud = {'A':{}, 'B':{}, 'C':{}, 'E':{}}
processed_rows = 0
from_date = None
to_date = None
for row in document:
cell_message = document[row]['message']
if from_date is None:
from_date = document[row]['timestamp']
if to_date is None:
to_date = document[row]['timestamp']
if document[row]['timestamp'] < from_date:
from_date = document[row]['timestamp']
if document[row]['timestamp'] > to_date:
to_date = document[row]['timestamp']
message_category = "no.category"
message_dn = ""
message_jobset = "no.jobset"
message_jobdef = "no.jobdef"
message_action = ""
message_site = "no.site"
message_reason = "no.reason"
message_weight = "no.weight"
# print 'Debug:',message_date,message_time,row_counter,cell_message
processed_rows += 1
tmp_message = str(cell_message.replace(' ', ' ')).split(' : ')
message_dn = tmp_message[0].split('=')[1].replace("\\\'", "").strip().replace(' ', '_')
tmp_job = tmp_message[1].split(' ')
if len(tmp_job) > 1:
message_jobset = tmp_job[0].split('=')[1].strip()
message_jobdef = tmp_job[1].split('=')[1].strip()
else:
if is_this_category(tmp_job[0], 'jobset'):
message_jobset = tmp_job[0].split('=')[1].strip()
if is_this_category(tmp_job[0], 'jobdef'):
message_jobdef = tmp_job[0].split('=')[1].strip()
## skip
if is_this_category(cell_message, ' action=skip '):
# continue # try to speed up
message_category = "D"
message_skip = tmp_message[2].split(' ')
message_action = message_skip[0].split('=')[1].strip()
message_site = message_skip[1].split('=')[1].strip()
message_reason = message_skip[2].split('=')[1].strip()
if re.search('=', message_skip[4]):
message_weight = message_skip[4].split('=')[1].strip()
else:
message_reason = '_'.join(message_skip[3:]).strip('_')
# exclude : add at 2011-10-26
elif is_this_category(cell_message, ' action=exclude '):
message_category = "E"
message_skip = tmp_message[2].split(' ')
message_action = message_skip[0].split('=')[1].strip()
message_site = message_skip[1].split('=')[1].strip()
message_reason = message_skip[2].split('=')[1].strip()
if re.search('=', message_skip[4]):
message_weight = message_skip[4].split('=')[1].strip()
else:
message_reason = '_'.join(message_skip[3:]).strip('_')
## choose
elif is_this_category(cell_message, ' action=choose '):
message_category = "C"
message_choose = tmp_message[2].split(' ')
message_action = message_choose[0].split('=')[1].strip()
message_site = message_choose[1].split('=')[1].strip()
message_reason = message_choose[2].split('=')[1].strip()
if re.search('=', message_choose[5]):
message_weight = message_choose[5].split('=')[1].strip()
else:
message_reason = '_'.join(message_choose[3:]).strip('_')
## action=use: add at 2011-10-26
elif is_this_category(cell_message, ' action=use '):
#message_category = "C"
message_choose = tmp_message[2].split(' ')
message_action = message_choose[0].split('=')[1].strip()
message_site = message_choose[1].split('=')[1].strip()
# message_reason = message_choose[2].split('=')[1].strip()
message_reason = '_'.join(message_choose[3:]).strip('_')
if is_this_category(message_reason, 'site'):
message_category = "A"
if is_this_category(message_reason, 'cloud'):
message_category = "B"
## use site or cloud
elif is_this_category(cell_message, ' use '):
message_use = tmp_message[2].split(' ')
message_action = message_use[0].strip()
message_site = message_use[1].strip()
message_reason = '_'.join(message_use[3:]).strip('_')
if is_this_category(message_reason, 'site'):
message_category = "A"
if is_this_category(message_reason, 'cloud'):
message_category = "B"
## append to records it belong to
if message_category in ['A', 'B', 'C', 'E']:
site_name, cloud = get_sitecloud_name(dic, message_site)
P_category[message_category] += 1
if site_name not in P_site[message_category]:
P_site[message_category][site_name] = 1
else:
P_site[message_category][site_name] += 1
if cloud not in P_cloud[message_category]:
P_cloud[message_category][cloud] = 1
else:
P_cloud[message_category][cloud] += 1
# print "INFOR: category=%s user=%s action=%s site=%s jobset=%s jobdef=%s reason=%s"%(message_category,message_dn,message_action,message_site,message_jobset,message_jobdef,message_reason)
return (processed_rows, from_date, to_date, P_category, P_site, P_cloud)
def sort_by_value(dic):
lists = dic.items()
backitems = [[v[1], v[0]] for v in lists]
backitems.sort(reverse=True)
return [ backitems[i][1] for i in range(0, len(backitems))]
def write_document(from_date, to_date, P_category, P_site, P_cloud, doc_file="logfile.html"):
data = open('template/CHART_logfile.html').read()
data = data.replace('#FROM_DATE#', from_date)
data = data.replace('#TO_DATE#', to_date)
data = data.replace('#TITLE_TEXT1#', CHARTS[0][1])
data = data.replace('#TITLE_TEXT2#', CHARTS[1][1])
data = data.replace('#TITLE_TEXT3#', CHARTS[2][1])
data = data.replace('#TITLE_TEXT4#', CHARTS[3][1])
data = data.replace('#TITLE_TEXT5#', CHARTS[4][1])
data = data.replace('#TITLE_TEXT6#', CHARTS[5][1])
data = data.replace('#TITLE_TEXT7#', CHARTS[6][1])
comm = ""
series_data1 = ""
for c in P_category:
series_data1 = "%s %s ['%s', %d]" % (series_data1, comm, c, P_category[c])
comm = ","
data = data.replace('#SERIES_DATA1#', series_data1)
comm = ""
series_data2 = ""
sorted = sort_by_value(P_site['A'])
cnt = 0
for k in sorted:
cloud = get_cloud_name(k)
color = ADC_COLOR[cloud]
series_data2 = "%s %s {name: '%s (%s)', y: %d, color: '%s'}" % (series_data2, comm, k, cloud, P_site['A'][k], color)
comm = ","
cnt += 1
if cnt >= 10:
break
data = data.replace('#SERIES_DATA2#', series_data2)
comm = ""
series_data3 = ""
sorted = sort_by_value(P_cloud['B'])
for k in sorted:
color = ADC_COLOR[k]
series_data3 = "%s %s {name: '%s', y: %d, color: '%s'}" % (series_data3, comm, k, P_cloud['B'][k], color)
comm = ","
data = data.replace('#SERIES_DATA3#', series_data3)
comm = ""
series_data4 = ""
sorted = sort_by_value(P_site['C'])
cnt = 0
for k in sorted:
cloud = get_cloud_name(k)
color = ADC_COLOR[cloud]
series_data4 = "%s %s {name: '%s (%s)', y: %d, color: '%s'}" % (series_data4, comm, k, cloud, P_site['C'][k], color)
comm = ","
cnt += 1
if cnt >= 20:
break
data = data.replace('#SERIES_DATA4#', series_data4)
comm = ""
series_data5 = ""
sorted = sort_by_value(P_cloud['C'])
for k in sorted:
color = ADC_COLOR[k]
series_data5 = "%s %s {name: '%s', y: %d, color: '%s'}" % (series_data5, comm, k, P_cloud['C'][k], color)
comm = ","
data = data.replace('#SERIES_DATA5#', series_data5)
comm = ""
series_data6 = ""
sorted = sort_by_value(P_site['E'])
cnt = 0
for k in sorted:
cloud = get_cloud_name(k)
color = ADC_COLOR[cloud]
series_data6 = "%s %s {name: '%s (%s)', y: %d, color: '%s'}" % (series_data6, comm, k, cloud, P_site['E'][k], color)
comm = ","
cnt += 1
if cnt >= 20:
break
data = data.replace('#SERIES_DATA6#', series_data6)
comm = ""
series_data7 = ""
sorted = sort_by_value(P_cloud['E'])
for k in sorted:
color = ADC_COLOR[k]
series_data7 = "%s %s {name: '%s', y: %d, color: '%s'}" % (series_data7, comm, k, P_cloud['E'][k], color)
comm = ","
data = data.replace('#SERIES_DATA7#', series_data7)
of = open(doc_file, 'w')
print >> of, data
of.close()
return True
def run(logfile):
#t1 = time.time()
document = get_document(logfile)
t2 = time.time()
processed_rows, from_date, to_date, P_category, P_site, P_cloud = parse_document(document)
t3 = time.time()
#time_get = t2-t1
time_parse = t3 - t2
write_document(from_date, to_date, P_category, P_site, P_cloud)
#time_db = t4-t3
print u'INFOR: Rows: %d Done(A/B/C/E: %d/%d/%d/%d). ParsingTime: %d' % (processed_rows, P_category['A'], P_category['B'], P_category['C'], P_category['E'], time_parse)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "ERROR: too few input parameters"
print "USAGE: python parse_logfile.py <input logfile>"
sys.exit(1) # before Python 2.5, after: exit(1)
#else:
# OUTPUT_FILENAME_PREFIX = sys.argv[1]
run(sys.argv[1])
|
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a group of subprocesses and then finish."""
from __future__ import print_function
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import errno
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
# Maximum number of bytes of job's stdout that will be stored in the result.
# Only last N bytes of stdout will be kept if the actual output longer.
_MAX_RESULT_SIZE = 64 * 1024
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in env.items():
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == 'Windows':
return 'windows'
elif platform.system()[:7] == 'MSYS_NT':
return 'windows'
elif platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
else:
return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
pass
else:
def alarm_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [31, 0],
'green': [32, 0],
'yellow': [33, 0],
'lightgray': [37, 0],
'gray': [30, 1],
'purple': [35, 0],
'cyan': [36, 0]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
return fn()
except IOError, e:
if e.errno != errno.EINTR:
raise
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
while True:
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write(
'%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
(_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
tag, msg, '\n'
if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
return
except IOError, e:
if e.errno != errno.EINTR:
raise
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self,
cmdline,
shortname=None,
environ=None,
cwd=None,
shell=False,
timeout_seconds=5 * 60,
flake_retries=0,
timeout_retries=0,
kill_handler=None,
cpu_cost=1.0,
verbose_success=False):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
"""
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
def identity(self):
return '%r %r' % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
self.cmdline)
def __str__(self):
return '%s: %s %s' % (self.shortname, ' '.join(
'%s=%s' % kv for kv in self.environ.items()),
' '.join(self.cmdline))
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ''
self.cpu_estimated = 1
self.cpu_measured = 1
def read_from_start(f):
f.seek(0)
return f.read()
class Job(object):
"""Manages one job."""
def __init__(self,
spec,
newline_on_success,
travis,
add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
self._tempfile = tempfile.TemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
# The Unix time command is finicky when used with MSBuild, so we don't use it
# with jobs that run MSBuild.
global measure_cpu_costs
if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
cmdline = ['time', '-p'] + cmdline
else:
measure_cpu_costs = False
try_start = lambda: subprocess.Popen(args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message('WARNING',
'Failed to start %s, retrying in %f seconds' %
(self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
stdout = read_from_start(self._tempfile)
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message(
'FLAKE',
'%s [ret=%d, pid=%d]' %
(self._spec.shortname, self._process.returncode,
self._process.pid),
stdout(),
do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message(
'FAILED',
'%s [ret=%d, pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.returncode,
self._process.pid, elapsed),
stdout(),
do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(
r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float('%.01f' % cores)
self.result.cpu_estimated = float(
'%.01f' % self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
self.result.cpu_measured, self.result.cpu_estimated)
if not self._quiet_success:
message(
'PASSED',
'%s [time=%.1fsec, retries=%d:%d%s]' %
(self._spec.shortname, elapsed, self._retries,
self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
time.time() - self._start > self._spec.timeout_seconds):
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._timeout_retries < self._spec.timeout_retries:
message(
'TIMEOUT_FLAKE',
'%s [pid=%d]' % (self._spec.shortname, self._process.pid),
stdout(),
do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
message(
'TIMEOUT',
'%s [pid=%d, time=%.1fsec]' % (self._spec.shortname,
self._process.pid, elapsed),
stdout(),
do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self._max_time = max_time
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if self._max_time > 0 and time.time(
) - self._start_time > self._max_time:
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
message('SKIPPED', spec.shortname, do_newline=True)
self.resultset[spec.shortname] = [skipped_job_result]
return True
if self.cancelled(): return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0: break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
if len(self._running) < self._maxjobs_cpu_agnostic:
break
self.reap(spec.shortname, spec.cpu_cost)
if self.cancelled(): return False
job = Job(spec, self._newline_on_success, self._travis, self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self, waiting_for=None, waiting_for_cost=None):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = eintr_be_gone(lambda: job.state())
if st == _RUNNING: continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead: return
if not self._travis and platform_string() != 'windows':
rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (
self._remaining + len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
if waiting_for is not None:
wstr = ' next: %s @ %.2f cpu' % (waiting_for,
waiting_for_cost)
else:
wstr = ''
message(
'WAITING',
'%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
(rstr, len(self._running), self._completed, self._failures,
self.cpu_cost(), wstr))
if platform_string() == 'windows':
time.sleep(0.1)
else:
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled: return True
if not self._check_cancelled(): return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled(): pass # poll cancellation
self.reap()
if platform_string() != 'windows':
signal.alarm(0)
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
maxjobs_cpu_agnostic=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False,
max_time=-1):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(check_cancelled, maxjobs if maxjobs is not None else
_DEFAULT_MAX_JOBS, maxjobs_cpu_agnostic
if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
|
|
from tests.base import BaseTestCase
from mod_auth.models import Role
from mod_regression.models import RegressionTest, Category, InputType, OutputType
from mod_sample.models import Sample
from flask import g
class TestControllers(BaseTestCase):
def test_root(self):
response = self.app.test_client().get('/regression/')
self.assertEqual(response.status_code, 200)
self.assert_template_used('regression/index.html')
def test_specific_regression_test_loads(self):
response = self.app.test_client().get('/regression/test/1/view')
self.assertEqual(response.status_code, 200)
self.assert_template_used('regression/test_view.html')
regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first()
self.assertIn(regression_test.command, str(response.data))
def test_regression_test_status_toggle(self):
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first()
response = c.get('/regression/test/1/toggle')
self.assertEqual(response.status_code, 200)
self.assertEqual('success', response.json['status'])
if regression_test.active == 1:
self.assertEqual('False', response.json['active'])
else:
self.assertEqual('True', response.json['active'])
def test_regression_test_deletion_Without_login(self):
response = self.app.test_client().get('/regression/test/9432/delete')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.test_delete', response.data)
def test_delete_if_will_throw_404(self):
"""
Check if it will throw an error 404
:return:
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.get('/regression/test/9432/delete')
self.assertEqual(response_regression.status_code, 404)
def test_delete(self):
"""
Check it will delete the test
:return:
"""
# Create Valid Entry
from mod_regression.models import InputType, OutputType
test = RegressionTest(1, '-autoprogram -out=ttxt -latin1 -2', InputType.file, OutputType.file, 3, 10)
g.db.add(test)
g.db.commit()
# Create Account to Delete Test
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
# Delete Test
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.get('/regression/test/1/delete')
self.assertEqual(response_regression.status_code, 200)
response = c.post(
'/regression/test/1/delete', data=dict(
hidden='yes',
submit=True
)
)
self.assertEqual(response.status_code, 302) # 302 is for Redirection
def test_add_category(self):
"""
Check it will add a category
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/category_add', data=dict(category_name="Lost", category_description="And found", submit=True))
self.assertNotEqual(Category.query.filter(Category.name=="Lost").first(),None)
def test_add_category_empty(self):
"""
Check it won't add a category with an empty name
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/category_add', data=dict(category_name="", category_description="And Lost", submit=True))
self.assertEqual(Category.query.filter(Category.name=="").first(),None)
self.assertEqual(Category.query.filter(Category.description=="And Lost").first(),None)
def test_edit_category(self):
"""
Check it will edit a category
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
response = c.post(
'/regression/category/1/edit', data=dict(category_name="Sheldon", category_description="That's my spot", submit=True))
self.assertNotEqual(Category.query.filter(Category.name=="Sheldon").first(),None)
def test_edit_category_empty(self):
"""
Check it won't edit a category with an empty name
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
response = c.post(
'/regression/category/1/edit', data=dict(category_name="", category_description="GG", submit=True))
self.assertEqual(Category.query.filter(Category.name=="").first(),None)
self.assertEqual(Category.query.filter(Category.description=="GG").first(),None)
self.assertNotEqual(Category.query.filter(Category.name=="C-137").first(),None)
def test_edit_wrong_category(self):
"""
Check it will throw 404 if trying to edit a category which doesn't exist
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
response_regression = c.post('regression/category/1729/edit',data=dict(category_name="Sheldon", category_description="That's my spot", submit=True))
self.assertEqual(response_regression.status_code, 404)
def test_add_test(self):
"""
Check it will add a regression test
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/new', data=dict(
sample_id = 1,
command = "-autoprogram -out=ttxt -latin1 -2",
input_type = "file",
output_type = "file",
category_id = 1,
expected_rc = 25,
submit = True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.id==3).first(),None)
def test_add_test_empty_erc(self):
"""
Check it will not add a regression test with empty Expected Runtime Code
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/new', data=dict(
sample_id = 1,
command = "-autoprogram -out=ttxt -latin1 -2",
input_type = InputType.file,
output_type = OutputType.file,
category_id = 1,
submit = True,
))
self.assertEqual(RegressionTest.query.filter(RegressionTest.id==3).first(),None)
def test_category_deletion_without_login(self):
response = self.app.test_client().get('/regression/category/9432/delete')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.category_delete', response.data)
def test_category_delete_if_will_throw_404(self):
"""
Check if it will throw an error 404
:return:
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.get('/regression/category/9432/delete')
self.assertEqual(response_regression.status_code, 404)
def test_category_delete(self):
"""
Check it will delete the Category
:return:
"""
# Create Account to Delete Category
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
# Delete Category
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.get('/regression/category/1/delete')
self.assertEqual(response_regression.status_code, 200)
response = c.post(
'/regression/category/1/delete', data=dict(
hidden='yes',
submit=True
)
)
self.assertEqual(response.status_code, 302) # 302 Is for Redirection,
def test_edit_test(self):
"""
Check it will edit a regression test
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/2/edit', data=dict(
sample_id = 1,
command = "-demogorgans",
input_type = "file",
output_type = "file",
category_id = 2,
expected_rc = 25,
submit = True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(),None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
self.assertNotEqual(i.id,2)
category = Category.query.filter(Category.id == 2).first()
for i in category.regression_tests:
if i.id == 2:
break
else:
self.assertEqual(0,1)
def test_edit_test_empty_erc(self):
"""
Check it will not edit a regression test with empty Expected Runtime Code
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/1/edit', data=dict(
sample_id = 1,
command = "-demogorgans",
input_type = "file",
output_type = "file",
category_id = 2,
submit = True,
))
self.assertEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(),None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
if i.id == 1:
break
else:
self.assertEqual(0,1)
category = Category.query.filter(Category.id == 2).first()
for i in category.regression_tests:
self.assertNotEqual(i.id,1)
def test_edit_wrong_test(self):
"""
Check it will throw 404 if trying to edit a regression test which doesn't exist
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.post(
'/regression/test/42/edit', data=dict(
sample_id = 1,
command = "-demogorgans",
input_type = "file",
output_type = "file",
expected_rc = 25,
category_id = 2,
submit = True,
))
self.assertEqual(response_regression.status_code, 404)
def test_edit_test_same_category(self):
"""
Check it won't create problems edit a regression test and not changing its category
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/2/edit', data=dict(
sample_id = 1,
command = "-demogorgans",
input_type = "file",
output_type = "file",
category_id = 1,
expected_rc = 25,
submit = True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(),None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
if i.id == 2:
break
else:
self.assertEqual(0,1)
def test_if_test_regression_view_throws_a_not_found_error(self):
"""
Check if the test doesn't exist and will throw an error 404
"""
response = self.app.test_client().get('regression/test/1337/view')
self.assertEqual(response.status_code, 404)
def test_if_test_toggle_view_throws_a_not_found_error(self):
"""
Check if the test toggle doesn't exist and will throw an error 404
"""
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response_login = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('regression/test/1337/toggle')
self.assertEqual(response.status_code, 404)
def test_sample_view(self):
"""
Test if it'll return a valid sample
"""
response = self.app.test_client().get('/regression/sample/1')
sample = Sample.query.filter(Sample.id == 1).first()
self.assertEqual(response.status_code, 200)
self.assert_context('sample', sample)
def test_sample_view_nonexistent(self):
"""
Test if it'll return a valid sample
"""
response = self.app.test_client().get('/regression/sample/13423423')
self.assertEqual(response.status_code, 404)
|
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import cgi
import httplib2
import logging
import os
import pickle
import time
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
from gcal.oauth2client import GOOGLE_AUTH_URI
from gcal.oauth2client import GOOGLE_REVOKE_URI
from gcal.oauth2client import GOOGLE_TOKEN_URI
from gcal.oauth2client import clientsecrets
from gcal.oauth2client import util
from gcal.oauth2client import xsrfutil
from gcal.oauth2client.anyjson import simplejson
from gcal.oauth2client.client import AccessTokenRefreshError
from gcal.oauth2client.client import AssertionCredentials
from gcal.oauth2client.client import Credentials
from gcal.oauth2client.client import Flow
from gcal.oauth2client.client import OAuth2WebServerFlow
from gcal.oauth2client.client import Storage
# TODO(dhermes): Resolve import issue.
# This is a temporary fix for a Google internal issue.
try:
from google.appengine.ext import ndb
except ImportError:
ndb = None
logger = logging.getLogger(__name__)
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
XSRF_MEMCACHE_ID = 'xsrf_secret_key'
def _safe_html(s):
"""Escape text to make it safe to display.
Args:
s: string, The text to escape.
Returns:
The escaped text as a string.
"""
return cgi.escape(s, quote=1).replace("'", ''')
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
class InvalidXsrfTokenError(Exception):
"""The XSRF token is invalid or expired."""
class SiteXsrfSecretKey(db.Model):
"""Storage for the sites XSRF secret key.
There will only be one instance stored of this model, the one used for the
site.
"""
secret = db.StringProperty()
if ndb is not None:
class SiteXsrfSecretKeyNDB(ndb.Model):
"""NDB Model for storage for the sites XSRF secret key.
Since this model uses the same kind as SiteXsrfSecretKey, it can be used
interchangeably. This simply provides an NDB model for interacting with the
same data the DB model interacts with.
There should only be one instance stored of this model, the one used for the
site.
"""
secret = ndb.StringProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'SiteXsrfSecretKey'
def _generate_new_xsrf_secret_key():
"""Returns a random XSRF secret key.
"""
return os.urandom(16).encode("hex")
def xsrf_secret_key():
"""Return the secret key for use for XSRF protection.
If the Site entity does not have a secret key, this method will also create
one and persist it.
Returns:
The secret key.
"""
secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
if not secret:
# Load the one and only instance of SiteXsrfSecretKey.
model = SiteXsrfSecretKey.get_or_insert(key_name='site')
if not model.secret:
model.secret = _generate_new_xsrf_secret_key()
model.put()
secret = model.secret
memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE)
return str(secret)
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for the
purpose of accessing data stored under an account assigned to the App Engine
application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials being
requested.
"""
self.scope = util.scopes_to_string(scope)
# Assertion type is no longer used, but still in the parent class signature.
super(AppAssertionCredentials, self).__init__(None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
scopes = self.scope.split()
(token, _) = app_identity.get_access_token(scopes)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
if ndb is not None:
class FlowNDBProperty(ndb.PickleProperty):
"""App Engine NDB datastore Property for Flow.
Serves the same purpose as the DB FlowProperty, but for NDB models. Since
PickleProperty inherits from BlobProperty, the underlying representation of
the data in the datastore will be the same as in the DB case.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow
"""
def _validate(self, value):
"""Validates a value as a proper Flow object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Flow.
"""
logger.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, Flow):
raise TypeError('Property %s must be convertible to a flow '
'instance; received: %s.' % (self._name, value))
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logger.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logger.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logger.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
if ndb is not None:
# TODO(dhermes): Turn this into a JsonProperty and overhaul the Credentials
# and subclass mechanics to use new_from_dict, to_dict,
# from_dict, etc.
class CredentialsNDBProperty(ndb.BlobProperty):
"""App Engine NDB datastore Property for Credentials.
Serves the same purpose as the DB CredentialsProperty, but for NDB models.
Since CredentialsProperty stores data as a blob and this inherits from
BlobProperty, the data in the datastore will be the same as in the DB case.
Utility property that allows easy storage and retrieval of Credentials and
subclasses.
"""
def _validate(self, value):
"""Validates a value as a proper credentials object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Credentials.
"""
logger.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, Credentials):
raise TypeError('Property %s must be convertible to a credentials '
'instance; received: %s.' % (self._name, value))
def _to_base_type(self, value):
"""Converts our validated value to a JSON serialized string.
Args:
value: A value to be set in the datastore.
Returns:
A JSON serialized version of the credential, else '' if value is None.
"""
if value is None:
return ''
else:
return value.to_json()
def _from_base_type(self, value):
"""Converts our stored JSON string back to the desired type.
Args:
value: A value from the datastore to be converted to the desired type.
Returns:
A deserialized Credentials (or subclass) object, else None if the
value can't be parsed.
"""
if not value:
return None
try:
# Uses the from_json method of the implied class of value
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
class StorageByKeyName(Storage):
"""Store and retrieve a credential to and from the App Engine datastore.
This Storage helper presumes the Credentials have been stored as a
CredentialsProperty or CredentialsNDBProperty on a datastore model class, and
that entities are stored by key_name.
"""
@util.positional(4)
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model or ndb.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
or CredentialsNDBProperty.
cache: memcache, a write-through cache to put in front of the datastore.
If the model you are using is an NDB model, using a cache will be
redundant since the model uses an instance cache and memcache for you.
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def _is_ndb(self):
"""Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
"""
# issubclass will fail if one of the arguments is not a class, only need
# worry about new-style classes since ndb and db models are new-style
if isinstance(self._model, type):
if ndb is not None and issubclass(self._model, ndb.Model):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError('Model class not an NDB or DB model: %s.' % (self._model,))
def _get_entity(self):
"""Retrieve entity from datastore.
Uses a different model method for db or ndb models.
Returns:
Instance of the model corresponding to the current storage object
and stored using the key name of the storage object.
"""
if self._is_ndb():
return self._model.get_by_id(self._key_name)
else:
return self._model.get_by_key_name(self._key_name)
def _delete_entity(self):
"""Delete entity from datastore.
Attempts to delete using the key_name stored on the object, whether or not
the given key is in the datastore.
"""
if self._is_ndb():
ndb.Key(self._model, self._key_name).delete()
else:
entity_key = db.Key.from_path(self._model.kind(), self._key_name)
db.delete(entity_key)
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credentials = None
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self)
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
return credentials
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
self._delete_entity()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
if ndb is not None:
class CredentialsNDBModel(ndb.Model):
"""NDB Model for storage of OAuth 2.0 Credentials
Since this model uses the same kind as CredentialsModel and has a property
which can serialize and deserialize Credentials correctly, it can be used
interchangeably with a CredentialsModel to access, insert and delete the
same entities. This simply provides an NDB model for interacting with the
same data the DB model interacts with.
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsNDBProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'CredentialsModel'
def _build_state_value(request_handler, user):
"""Composes the value for the 'state' parameter.
Packs the current request URI and an XSRF token into an opaque string that
can be passed to the authentication server via the 'state' parameter.
Args:
request_handler: webapp.RequestHandler, The request.
user: google.appengine.api.users.User, The current user.
Returns:
The state value as a string.
"""
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
action_id=str(uri))
return uri + ':' + token
def _parse_state_value(state, user):
"""Parse the value of the 'state' parameter.
Parses the value and validates the XSRF token in the state parameter.
Args:
state: string, The value of the state parameter.
user: google.appengine.api.users.User, The current user.
Raises:
InvalidXsrfTokenError: if the XSRF token is invalid.
Returns:
The redirect URI.
"""
uri, token = state.rsplit(':', 1)
if not xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
action_id=uri):
raise InvalidXsrfTokenError()
return uri
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
auth_uri=GOOGLE_AUTH_URI,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
user_agent=None,
message=None,
callback_path='/oauth2callback',
token_response_param=None,
**kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
callback_path: string, The absolute path to use as the callback URI. Note
that this must match up with the URI given when registering the
application in the APIs Console.
token_response_param: string. If provided, the full JSON response
to the access token request will be encoded and included in this query
parameter in the callback URI. This is useful with providers (e.g.
wordpress.com) that include extra fields that the client may want.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = None
self.credentials = None
self._client_id = client_id
self._client_secret = client_secret
self._scope = util.scopes_to_string(scope)
self._auth_uri = auth_uri
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._user_agent = user_agent
self._kwargs = kwargs
self._message = message
self._in_error = False
self._callback_path = callback_path
self._token_response_param = token_response_param
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
return method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def _create_flow(self, request_handler):
"""Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
"""
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path) # Usually /oauth2callback
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret,
self._scope, redirect_uri=redirect_uri,
user_agent=self._user_agent,
auth_uri=self._auth_uri,
token_uri=self._token_uri,
revoke_uri=self._revoke_uri,
**self._kwargs)
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
return method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
url = self.flow.step1_get_authorize_url()
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
@property
def callback_path(self):
"""The absolute path where the callback will occur.
Note this is the absolute path, not the absolute URI, that will be
calculated by the decorator at runtime. See callback_handler() for how this
should be used.
Returns:
The callback path as a string.
"""
return self._callback_path
def callback_handler(self):
"""RequestHandler for the OAuth 2.0 redirect callback.
Usage:
app = webapp.WSGIApplication([
('/index', MyIndexHandler),
...,
(decorator.callback_path, decorator.callback_handler())
])
Returns:
A webapp.RequestHandler that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
decorator = self
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % _safe_html(errormsg))
else:
user = users.get_current_user()
decorator._create_flow(self)
credentials = decorator.flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
redirect_uri = _parse_state_value(str(self.request.get('state')),
user)
if decorator._token_response_param and credentials.token_response:
resp_json = simplejson.dumps(credentials.token_response)
redirect_uri = util._add_query_parameter(
redirect_uri, decorator._token_response_param, resp_json)
self.redirect(redirect_uri)
return OAuth2Handler
def callback_application(self):
"""WSGI application for handling the OAuth 2.0 redirect callback.
If you need finer grained control use `callback_handler` which returns just
the webapp.RequestHandler.
Returns:
A webapp.WSGIApplication that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
return webapp.WSGIApplication([
(self.callback_path, self.callback_handler())
])
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(3)
def __init__(self, filename, scope, message=None, cache=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML
and will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
"""
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type not in [
clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError(
'OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
constructor_kwargs = {
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
'message': message,
}
revoke_uri = client_info.get('revoke_uri')
if revoke_uri is not None:
constructor_kwargs['revoke_uri'] = revoke_uri
super(OAuth2DecoratorFromClientSecrets, self).__init__(
client_info['client_id'], client_info['client_secret'],
scope, **constructor_kwargs)
if message is not None:
self._message = message
else:
self._message = 'Please configure your application for OAuth 2.0.'
@util.positional(2)
def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache)
|
|
from __future__ import absolute_import
import collections
import logging
import os
import re
import warnings
from pip._vendor import pkg_resources, six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import RequirementParseError
from pip._internal.exceptions import InstallationError
from pip._internal.req import InstallRequirement
from pip._internal.req.req_file import COMMENT_RE
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
logger = logging.getLogger(__name__)
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
isolated=False,
wheel_cache=None,
exclude_editable=False,
skip=()):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
try:
req = FrozenRequirement.from_dist(
dist,
dependency_links
)
except RequirementParseError:
logger.warning(
"Could not parse requirement: %s",
dist.project_name
)
continue
if exclude_editable and req.editable:
continue
installations[req.name] = req
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options = set()
# keep track of which files a requirement is in so that we can
# give an accurate warning if a requirement appears multiple times.
req_files = collections.defaultdict(list)
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
COMMENT_RE.sub('', line).strip(),
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path, line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
# either it's not installed, or it is installed
# but has been processed already
if not req_files[line_req.name]:
logger.warning(
"Requirement file [%s] contains %s, but that "
"package is not installed",
req_file_path,
COMMENT_RE.sub('', line).strip(),
)
else:
req_files[line_req.name].append(req_file_path)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
req_files[line_req.name].append(req_file_path)
# Warn about requirements that were included multiple times (in a
# single requirements file or in different requirements files).
for name, files in six.iteritems(req_files):
if len(files) > 1:
logger.warning("Requirement %s included multiple times [%s]",
name, ', '.join(sorted(set(files))))
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
if canonicalize_name(installation.name) not in skip:
yield str(installation).rstrip()
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip._internal.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req,
)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
warnings.warn(
"SVN editable detection based on dependency links "
"will be dropped in the future.",
RemovedInPip11Warning,
)
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
|
|
"""Support for FFmpeg."""
from __future__ import annotations
import asyncio
import re
from haffmpeg.tools import IMAGE_JPEG, FFVersion, ImageFrame
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONTENT_TYPE_MULTIPART,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.loader import bind_hass
DOMAIN = "ffmpeg"
SERVICE_START = "start"
SERVICE_STOP = "stop"
SERVICE_RESTART = "restart"
SIGNAL_FFMPEG_START = "ffmpeg.start"
SIGNAL_FFMPEG_STOP = "ffmpeg.stop"
SIGNAL_FFMPEG_RESTART = "ffmpeg.restart"
DATA_FFMPEG = "ffmpeg"
CONF_INITIAL_STATE = "initial_state"
CONF_INPUT = "input"
CONF_FFMPEG_BIN = "ffmpeg_bin"
CONF_EXTRA_ARGUMENTS = "extra_arguments"
CONF_OUTPUT = "output"
DEFAULT_BINARY = "ffmpeg"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Optional(CONF_FFMPEG_BIN, default=DEFAULT_BINARY): cv.string}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_FFMPEG_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
async def async_setup(hass, config):
"""Set up the FFmpeg component."""
conf = config.get(DOMAIN, {})
manager = FFmpegManager(hass, conf.get(CONF_FFMPEG_BIN, DEFAULT_BINARY))
await manager.async_get_version()
# Register service
async def async_service_handle(service):
"""Handle service ffmpeg process."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if service.service == SERVICE_START:
async_dispatcher_send(hass, SIGNAL_FFMPEG_START, entity_ids)
elif service.service == SERVICE_STOP:
async_dispatcher_send(hass, SIGNAL_FFMPEG_STOP, entity_ids)
else:
async_dispatcher_send(hass, SIGNAL_FFMPEG_RESTART, entity_ids)
hass.services.async_register(
DOMAIN, SERVICE_START, async_service_handle, schema=SERVICE_FFMPEG_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_STOP, async_service_handle, schema=SERVICE_FFMPEG_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_RESTART, async_service_handle, schema=SERVICE_FFMPEG_SCHEMA
)
hass.data[DATA_FFMPEG] = manager
return True
@bind_hass
def get_ffmpeg_manager(hass: HomeAssistant) -> FFmpegManager:
"""Return the FFmpegManager."""
if DATA_FFMPEG not in hass.data:
raise ValueError("ffmpeg component not initialized")
return hass.data[DATA_FFMPEG]
@bind_hass
async def async_get_image(
hass: HomeAssistant,
input_source: str,
output_format: str = IMAGE_JPEG,
extra_cmd: str | None = None,
width: int | None = None,
height: int | None = None,
) -> bytes | None:
"""Get an image from a frame of an RTSP stream."""
manager = hass.data[DATA_FFMPEG]
ffmpeg = ImageFrame(manager.binary)
if width and height and (extra_cmd is None or "-s" not in extra_cmd):
size_cmd = f"-s {width}x{height}"
if extra_cmd is None:
extra_cmd = size_cmd
else:
extra_cmd += " " + size_cmd
image = await asyncio.shield(
ffmpeg.get_image(input_source, output_format=output_format, extra_cmd=extra_cmd)
)
return image
class FFmpegManager:
"""Helper for ha-ffmpeg."""
def __init__(self, hass, ffmpeg_bin):
"""Initialize helper."""
self.hass = hass
self._cache = {}
self._bin = ffmpeg_bin
self._version = None
self._major_version = None
@property
def binary(self):
"""Return ffmpeg binary from config."""
return self._bin
async def async_get_version(self):
"""Return ffmpeg version."""
ffversion = FFVersion(self._bin)
self._version = await ffversion.get_version()
self._major_version = None
if self._version is not None:
result = re.search(r"(\d+)\.", self._version)
if result is not None:
self._major_version = int(result.group(1))
return self._version, self._major_version
@property
def ffmpeg_stream_content_type(self):
"""Return HTTP content type for ffmpeg stream."""
if self._major_version is not None and self._major_version > 3:
return CONTENT_TYPE_MULTIPART.format("ffmpeg")
return CONTENT_TYPE_MULTIPART.format("ffserver")
class FFmpegBase(Entity):
"""Interface object for FFmpeg."""
def __init__(self, initial_state=True):
"""Initialize ffmpeg base object."""
self.ffmpeg = None
self.initial_state = initial_state
async def async_added_to_hass(self):
"""Register dispatcher & events.
This method is a coroutine.
"""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_START, self._async_start_ffmpeg
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_STOP, self._async_stop_ffmpeg
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_RESTART, self._async_restart_ffmpeg
)
)
# register start/stop
self._async_register_events()
@property
def available(self):
"""Return True if entity is available."""
return self.ffmpeg.is_running
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
async def _async_start_ffmpeg(self, entity_ids):
"""Start a FFmpeg process.
This method is a coroutine.
"""
raise NotImplementedError()
async def _async_stop_ffmpeg(self, entity_ids):
"""Stop a FFmpeg process.
This method is a coroutine.
"""
if entity_ids is None or self.entity_id in entity_ids:
await self.ffmpeg.close()
async def _async_restart_ffmpeg(self, entity_ids):
"""Stop a FFmpeg process.
This method is a coroutine.
"""
if entity_ids is None or self.entity_id in entity_ids:
await self._async_stop_ffmpeg(None)
await self._async_start_ffmpeg(None)
@callback
def _async_register_events(self):
"""Register a FFmpeg process/device."""
async def async_shutdown_handle(event):
"""Stop FFmpeg process."""
await self._async_stop_ffmpeg(None)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_shutdown_handle)
# start on startup
if not self.initial_state:
return
async def async_start_handle(event):
"""Start FFmpeg process."""
await self._async_start_ffmpeg(None)
self.async_write_ha_state()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, async_start_handle)
|
|
import asyncio
import binascii
import cgi
import collections
import datetime
import enum
import http.cookies
import io
import json
import math
import time
import warnings
from email.utils import parsedate
from types import MappingProxyType
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, multipart
from .helpers import HeadersMixin, reify, sentinel
from .protocol import WebResponse as ResponseImpl
from .protocol import HttpVersion10, HttpVersion11
from .streams import EOF_MARKER
__all__ = (
'ContentCoding', 'Request', 'StreamResponse', 'Response',
'json_response'
)
FileField = collections.namedtuple('Field', 'name filename file content_type')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Request
############################################################
class Request(collections.MutableMapping, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, message, payload, transport, reader, writer,
time_service, *,
secure_proxy_ssl_header=None):
self._app = None
self._message = message
self._transport = transport
self._reader = reader
self._writer = writer
self._post = None
self._post_files_cache = None
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
self._payload = payload
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
self._time_service = time_service
self._state = {}
self._cache = {}
# MutableMapping API
def __getitem__(self, key):
return self._state[key]
def __setitem__(self, key, value):
self._state[key] = value
def __delitem__(self, key):
del self._state[key]
def __len__(self):
return len(self._state)
def __iter__(self):
return iter(self._state)
########
@reify
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
warnings.warn("path_qs property is deprecated, "
"use .url.scheme instead",
DeprecationWarning)
return self.url.scheme
@reify
def _scheme(self):
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self.headers.get(header) == value:
return 'https'
return 'http'
@reify
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._message.method
@reify
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._message.version
@reify
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
warnings.warn("host property is deprecated, "
"use .url.host instead",
DeprecationWarning)
return self._message.headers.get(hdrs.HOST)
@reify
def rel_url(self):
return URL(self._message.path)
@reify
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
warnings.warn("path_qs property is deprecated, "
"use str(request.rel_url) instead",
DeprecationWarning)
return str(self.rel_url)
@reify
def url(self):
return URL('{}://{}{}'.format(self._scheme,
self._message.headers.get(hdrs.HOST),
str(self.rel_url)))
@reify
def raw_path(self):
""" The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
warnings.warn("raw_path property is deprecated, "
"use .rel_url.raw_path instead",
DeprecationWarning)
return self.rel_url.raw_path
@reify
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
warnings.warn("path property is deprecated, use .rel_url.path instead",
DeprecationWarning)
return self.rel_url.path
@reify
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
warnings.warn("query_string property is deprecated, "
"use .rel_url.query_string instead",
DeprecationWarning)
return self.rel_url.query_string
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
warnings.warn("GET property is deprecated, use .rel_url.query instead",
DeprecationWarning)
return self.rel_url.query
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
warnings.warn("POST property is deprecated, use .post() instead",
DeprecationWarning)
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@reify
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return CIMultiDictProxy(self._message.headers)
@reify
def raw_headers(self):
"""A sequence of pars for all headers."""
return tuple(self._message.raw_headers)
@reify
def if_modified_since(self, _IF_MODIFIED_SINCE=hdrs.IF_MODIFIED_SINCE):
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_IF_MODIFIED_SINCE)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@reify
def keep_alive(self):
"""Is keepalive enabled by client?"""
if self.version < HttpVersion10:
return False
else:
return not self._message.should_close
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@reify
def app(self):
"""Application instance."""
return self._match_info.apps[-1]
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@reify
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
raw = self.headers.get(hdrs.COOKIE, '')
parsed = http.cookies.SimpleCookie(raw)
return MappingProxyType(
{key: val.value for key, val in parsed.items()})
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
chunk = yield from self._payload.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if chunk is EOF_MARKER:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loads=json.loads, loader=None):
"""Return BODY as JSON."""
if loader is not None:
warnings.warn(
"Using loader argument is deprecated, use loads instead",
DeprecationWarning)
loads = loader
body = yield from self.text()
return loads(body)
@asyncio.coroutine
def multipart(self, *, reader=multipart.MultipartReader):
"""Return async iterator to process BODY as multipart."""
return reader(self.headers, self.content)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
if self.content_type.startswith('multipart/'):
warnings.warn('To process multipart requests use .multipart'
' coroutine instead.', DeprecationWarning)
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
_count = 1
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name+str(_count)] = field
_count += 1
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
ascii_encodable_path = self.path.encode('ascii', 'backslashreplace') \
.decode('ascii')
return "<{} {} {} >".format(self.__class__.__name__,
self.method, ascii_encodable_path)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = http.cookies.SimpleCookie()
self.set_status(status, reason)
self._req = None
self._resp_impl = None
self._eof_sent = False
if headers is not None:
self._headers.extend(headers)
if hdrs.CONTENT_TYPE not in self._headers:
self._headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
def _copy_cookies(self):
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
self.headers.add(hdrs.SET_COOKIE, value)
@property
def prepared(self):
return self._resp_impl is not None
@property
def started(self):
warnings.warn('use Response.prepared instead', DeprecationWarning)
return self.prepared
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
elif force is not None:
assert isinstance(force, ContentCoding), ("force should one of "
"None, bool or "
"ContentEncoding")
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self.headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
self.headers.pop(hdrs.LAST_MODIFIED, None)
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
@property
def tcp_nodelay(self):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot get tcp_nodelay for "
"not prepared response")
return resp_impl.transport.tcp_nodelay
def set_tcp_nodelay(self, value):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot set tcp_nodelay for "
"not prepared response")
resp_impl.transport.set_tcp_nodelay(value)
@property
def tcp_cork(self):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot get tcp_cork for "
"not prepared response")
return resp_impl.transport.tcp_cork
def set_tcp_cork(self, value):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot set tcp_cork for "
"not prepared response")
resp_impl.transport.set_tcp_cork(value)
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
"Response has been started with different request.")
else:
return self._resp_impl
else:
return None
def _do_start_compression(self, coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._resp_impl.add_compression_filter(coding.value)
self.content_length = None
def _start_compression(self, request):
if self._compression_force:
self._do_start_compression(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
self._do_start_compression(coding)
return
def start(self, request):
warnings.warn('use .prepare(request) instead', DeprecationWarning)
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
return self._start(request)
@asyncio.coroutine
def prepare(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
for app in request.match_info.apps:
yield from app.on_response_prepare.send(request, self)
return self._start(request)
def _start(self, request):
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
version = request.version
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
version,
not keep_alive,
self._reason)
self._copy_cookies()
headers = self.headers
if self._compression:
self._start_compression(request)
if self._chunked:
if request.version != HttpVersion11:
raise RuntimeError("Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(
request.version))
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers[hdrs.TRANSFER_ENCODING] = 'chunked'
else:
resp_impl.length = self.content_length
if hdrs.DATE not in headers:
headers[hdrs.DATE] = request._time_service.strtime()
headers.setdefault(hdrs.SERVER, resp_impl.SERVER_SOFTWARE)
if hdrs.CONNECTION not in headers:
if keep_alive:
if version == HttpVersion10:
headers[hdrs.CONNECTION] = 'keep-alive'
else:
if version == HttpVersion11:
headers[hdrs.CONNECTION] = 'close'
resp_impl.headers = headers
self._send_headers(resp_impl)
return resp_impl
def _send_headers(self, resp_impl):
# Durty hack required for
# https://github.com/KeepSafe/aiohttp/issues/1093
# File sender may override it
resp_impl.send_headers()
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
def __repr__(self):
if self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None,
charset=None):
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
if content_type is not None and ";" in content_type:
raise ValueError("charset must not be in content_type "
"argument")
if text is not None:
if hdrs.CONTENT_TYPE in headers:
if content_type or charset:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" %
type(text))
if content_type is None:
content_type = 'text/plain'
if charset is None:
charset = 'utf-8'
headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=' + charset)
body = text.encode(charset)
text = None
else:
if hdrs.CONTENT_TYPE in headers:
if content_type is not None or charset is not None:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
if content_type is not None:
if charset is not None:
content_type += '; charset=' + charset
headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, headers=headers)
if text is not None:
self.text = text
else:
self.body = body
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError("body argument must be bytes (%r)" % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError("text argument must be str (%r)" % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if (body is not None and
self._req.method != hdrs.METH_HEAD and
self._status not in [204, 304]):
self.write(body)
yield from super().write_eof()
def json_response(data=sentinel, *, text=None, body=None, status=200,
reason=None, headers=None, content_type='application/json',
dumps=json.dumps):
if data is not sentinel:
if text or body:
raise ValueError(
"only one of data, text, or body should be specified"
)
else:
text = dumps(data)
return Response(text=text, body=body, status=status, reason=reason,
headers=headers, content_type=content_type)
|
|
"""
Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = str
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
__all__ = ['JSONEncoder']
|
|
"""
Created on May 16, 2016
@author: Jafar Taghiyar (jtaghiyar@bccrc.ca)
"""
from __future__ import unicode_literals
#============================
# Django imports
#----------------------------
from django.core.urlresolvers import reverse
from django.db import models
#============================
# App imports
#----------------------------
from .helpers import *
#============================
# 3rd-party app imports
#----------------------------
from taggit.managers import TaggableManager
from taggit.models import Tag
from simple_history.models import HistoricalRecords
from simple_history import register
## register taggit for tracking its history
register(Tag)
#============================
# helpers
#----------------------------
# library_states = [
# "not_ready",
# "submission_ready",
# "submitted",
# "sequenced",
# "resubmitted",
# ]
class SequencingManager(models.Manager):
def with_data(self):
return [obj for obj in self.get_queryset()
if obj.has_sequencing_detail() and
obj.sequencingdetail.path_to_archive]
#============================
# Sample models
#----------------------------
class Sample(models.Model, FieldValue):
"""
Base class of different sample types.
"""
class Meta:
ordering = ('sample_id',)
## track history
history = HistoricalRecords(
table_name='history_sample'
)
## choices
sample_type_choices = (
('P','Patient'),
('C','Cell Line'),
('X','Xenograft'),
('O','Other'),
)
## required fields
sample_id = create_chrfield("Sample ID", blank=False)
## other fields
taxonomy_id = create_chrfield(
"Taxonomy ID",
default="9606"
)
sample_type = create_chrfield(
"Sample type",
choices=sample_type_choices
)
anonymous_patient_id = create_chrfield("Anonymous patient ID")
cell_line_id = create_chrfield("Cell line ID")
xenograft_id = create_chrfield("Xenograft ID")
xenograft_recipient_taxonomy_id = create_chrfield(
"Xenograft recipient taxonomy ID",
default="10090"
)
strain = create_chrfield("Strain")
xenograft_biopsy_date = models.DateField(
"Xenograft biopsy date",
null=True,
blank=True,
)
def has_additional_sample_information(self):
return hasattr(self,
'additionalsampleinformation'
)
def get_absolute_url(self):
return reverse("core:sample_detail", kwargs={"pk": self.pk})
## this is no longer needed since each sample can now have only one type.
# def get_sample_types(self):
# """get all the sample types under the same sample_id."""
# samples = Sample.objects.filter(sample_id=self.sample_id)
# types = [getattr(s, "get_sample_type_display")()
# for s in samples
# ]
# return types
def __str__(self):
return self.sample_id
class AdditionalSampleInformation(models.Model, FieldValue):
"""
Additional sample information.
"""
## track history
# history = HistoricalRecords(
# table_name='history_additional_sample_information'
# )
## database relationships
sample = models.OneToOneField(
Sample,
verbose_name="Sample",
on_delete=models.CASCADE
)
## choices
# disease_condition_health_status_choices = (
# ('H','Healthy'),
# ('C','Cystic Fibrosis'),
# ('B','Breast Cancer'),
# )
# index_read_type_choices = (
# ('on_3rd_read','On 3rd (index-specific) read'),
# ('on_forward_read','On forward read'),
# ('on_reverse_read','On reverse read'),
# )
pathology_occurrence_choices = (
('PR','Primary'),
('RC','Recurrent or Relapse'),
('ME','Metastatic'),
('RM','Remission'),
('UN','Undetermined'),
('US','Unspecified'),
)
sex_choices = (
('M', 'Male'),
('F', 'Female'),
('X', 'Mixed'),
('U', 'Unknown')
)
tissue_type_choises = (
('N', 'Normal'),
('B', 'Benign'),
('PM', 'Pre-malignant'),
('M', 'Malignant'),
('NNP', 'Non-neoplastic Disease'),
('U', 'Undetermined'),
('HP', 'Hyperplasia'),
('MP', 'Metaplasia'),
('DP', 'Dysplasia'),
)
treatment_status_choices = (
('PR','Pre-treatment'),
('IN','In-treatment'),
('PO','Post-treatment'),
('NA','N/A'),
('UN','Unknown'),
)
## fields
disease_condition_health_status = create_chrfield(
"Disease condition/health status",
# choices=disease_condition_health_status_choices,
)
sex = create_chrfield(
"Sex",
choices=sex_choices,
)
patient_biopsy_date = models.DateField(
"Patient biopsy date",
null=True,
blank=True,
)
anatomic_site = create_chrfield("Anatomic site")
anatomic_sub_site = create_chrfield("Anatomic sub-site")
developmental_stage = create_chrfield("Developmental stage")
tissue_type = create_chrfield(
"Tissue type",
choices=tissue_type_choises
)
cell_type = create_chrfield("Cell type")
pathology_disease_name = create_chrfield("Pathology/disease name")
additional_pathology_info = create_chrfield(
"Additional pathology information"
)
grade = create_chrfield("Grade")
stage = create_chrfield("Stage")
tumour_content = create_chrfield("Tumor content (%)")
pathology_occurrence = create_chrfield(
"Pathology occurrence",
choices=pathology_occurrence_choices
)
treatment_status = create_chrfield(
"Treatment status",
choices=treatment_status_choices
)
family_information = create_chrfield("Family information")
def __str__(self):
res = '_'.join([
self.sample.sample_id,
'additional_information'
])
return res
#============================
# Library models
#----------------------------
class Library(models.Model, FieldValue, LibraryAssistant):
"""
Library contains several Cell objects.
"""
class Meta:
ordering = ('sample', 'pool_id')
fields_to_exclude = ['ID', 'Sample']
values_to_exclude = ['id', 'sample']
## track history
history = HistoricalRecords(
table_name='history_library'
)
## Taggit
projects = TaggableManager(
verbose_name="Project",
help_text="A comma-separated list of project names.",
blank=True
)
## database relationships
sample = models.ForeignKey(
Sample,
verbose_name="Sample",
on_delete=models.CASCADE
)
# sample_id = create_chrfield("Sample ID", blank=False)
pool_id = create_chrfield("Chip ID", blank=False)
jira_ticket = create_chrfield("Jira ticket", blank=False)
num_sublibraries = create_intfield("Number of sublibraries", default=0)
description = create_textfield("Description")
result = create_textfield("Result")
relates_to = models.ManyToManyField(
"self",
verbose_name="Relates to",
# null=True,
blank=True,
)
def get_absolute_url(self):
return reverse("core:library_detail", kwargs={"pk": self.pk})
def get_library_id(self):
return '_'.join([self.sample.sample_id, self.pool_id])
def has_sublibrary_info(self):
return self.sublibraryinformation_set.exists()
def __str__(self):
return 'LIB_' + self.get_library_id()
class SublibraryInformation(models.Model, FieldValue):
"""
Sublibrary Information from the SmartChipApp output file.
It's technically a table of cell information.
"""
fields_to_exclude = ['ID', 'Library']
values_to_exclude = ['id', 'library']
## track history
# history = HistoricalRecords(
# table_name='history_sublibrary_information'
# )
## database relationships
library = models.ForeignKey(
Library,
verbose_name="Library",
on_delete=models.CASCADE
)
## fields
sample = create_chrfield("Sample")
row = create_intfield("Row")
column = create_intfield("Column")
img_col = create_intfield("Image Column")
file_ch1 = create_chrfield("File_Ch1")
file_ch2 = create_chrfield("File_Ch2")
fld_section = create_chrfield("Fld_Section")
fld_index = create_chrfield("Fld_Index")
num_live = create_intfield("Num_Live")
num_dead = create_intfield("Num_Dead")
num_other = create_intfield("Num_Other")
rev_live = create_intfield("Rev_Live")
rev_dead = create_intfield("Rev_Dead")
rev_other = create_intfield("Rev_Other")
spot_class = create_chrfield("Spot_Class")
index_i7 = create_chrfield("Index_I7")
primer_i7 = create_chrfield("Primer_I7")
index_i5 = create_chrfield("Index_I5")
primer_i5 = create_chrfield("Primer_I5")
pick_met = create_chrfield("Pick_Met")
spot_well = create_chrfield("Spot_Well")
num_drops = create_intfield("Num_Drops")
def get_sublibrary_id(self):
#add leading zero to one digit row/col
row = str(self.row) if self.row > 9 else '0' + str(self.row)
col = str(self.column) if self.column > 9 else '0' + str(self.column)
res = '_'.join(
[
self.library.sample.sample_id,
self.library.pool_id,
'R' + row,
'C' + col,
]
)
return res
def __str__(self):
return self.get_sublibrary_id()
class LibrarySampleDetail(models.Model, FieldValue):
"""
Library sample details.
"""
fields_to_exclude = ['ID', 'Library']
values_to_exclude = ['id', 'library']
## track history
# history = HistoricalRecords(
# table_name='history_library_sample_detail'
# )
## database relationships
library = models.OneToOneField(
Library,
verbose_name="Library",
on_delete=models.CASCADE,
null=True,
blank=True
)
## choices
cell_state_choices = (
('C','Cells'),
('N','Nuclei'),
('M','Mixed'),
('U','Unknown'),
)
spotting_location_choices = (
('A','Aparicio Lab'),
('H','Hansen Lab'),
('G','GSC'),
)
## fields
cell_state = create_chrfield(
"Cell state",
choices=cell_state_choices
)
estimated_percent_viability = create_intfield(
"Estimated percent viability",
)
label_of_original_sample_vial = create_chrfield(
"Label of original sample vial"
)
original_storage_temperature = create_intfield(
"Original storage temperature (C)",
)
passage_of_cell_line = create_intfield("Passage")
sample_notes = create_textfield("Sample notes")
sample_preparation_method = create_textfield("Sample preparation method")
sample_preservation_method = create_chrfield("Sample preservation method")
sample_spot_date = models.DateField(
"Sample spot date",
null=True,
blank=True,
)
spotting_location = create_chrfield(
"Spotting location",
choices=spotting_location_choices
)
class LibraryConstructionInformation(models.Model, FieldValue):
"""
Library construction information.
"""
fields_to_exclude = ['ID', 'Library']
values_to_exclude = ['id', 'library']
## track history
# history = HistoricalRecords(
# table_name='history_library_construction_information'
# )
## database relationships
library = models.OneToOneField(
Library,
verbose_name="Library",
on_delete=models.CASCADE,
null=True,
blank=True
)
## choices
chip_format_choices = (
('W','Wafergen'),
('M','Microfluidic'),
('B','Bulk'),
('O','Other'),
)
spotting_location_choices = (
('A','Aparicio Lab'),
('H','Hansen Lab'),
('G','GSC'),
)
## fields
chip_format = create_chrfield(
"Chip format",
choices=chip_format_choices,
default="W"
)
library_construction_method = create_chrfield(
"Library construction method",
default="Nextera (Illumina)"
)
library_type = create_chrfield(
"Library type",
default="genome"
)
library_notes = create_textfield("Library notes")
library_prep_date = models.DateField(
"Library prep date",
null=True,
blank=True,
)
number_of_pcr_cycles = create_intfield(
"Number of PCR cycles",
default=11
)
protocol = create_textfield("Protocol")
spotting_location = create_chrfield(
"Spotting location",
choices=spotting_location_choices
)
class LibraryQuantificationAndStorage(models.Model, FieldValue):
"""
Library quantification and storage.
"""
fields_to_exclude = [
'ID',
'Library',
'Freezer',
'Rack',
'Shelf',
'Box',
'Position in box'
]
values_to_exclude = [
'id',
'library',
'freezer',
'rack',
'shelf',
'box',
'position_in_box'
]
## track history
# history = HistoricalRecords(
# table_name='history_library_quantification_and_storage'
# )
## database relationships
library = models.OneToOneField(
Library,
verbose_name="Library",
on_delete=models.CASCADE,
null=True,
blank=True
)
## choices
qc_check_choices = (
('P','Will sequence'),
('N','Will not sequence'),
)
## fields
average_size = create_intfield("Average size (bp)")
dna_concentration_nm = models.DecimalField(
"DNA concentration (nM)",
max_digits=6,
decimal_places=3,
null=True,
blank=True
)
dna_concentration_ngul = models.DecimalField(
"DNA concentration (ng/uL)",
max_digits=6,
decimal_places=3,
null=True,
blank=True
)
dna_volume = create_chrfield("DNA volume (uL)")
freezer = create_chrfield(
"Freezer",
# default="UL1",
)
rack = create_intfield("Rack")
shelf = create_intfield("Shelf")
box = create_intfield("Box")
position_in_box = create_intfield("Position in box")
library_tube_label = create_chrfield("Library tube label")
qc_check = create_chrfield(
"QC check",
choices=qc_check_choices
)
qc_notes = create_textfield("QC notes")
quantification_method = create_chrfield(
"Quantification method",
default="Bioanalyzer"
)
size_range = create_chrfield("Size range (bp)")
size_selection_method = create_chrfield(
"Size selection method",
default="AmpureXP"
)
storage_medium = create_chrfield(
"Storage medium",
default="TE 10:0.1"
)
agilent_bioanalyzer_xad = models.FileField(
"Agilent bioanalyzer xad file",
upload_to=upload_path,
max_length=200,
null=True,
blank=True
)
agilent_bioanalyzer_image = models.FileField(
"Agilent bioanalyzer image file",
upload_to=upload_path,
max_length=200,
null=True,
blank=True
)
def library_location(self):
loc = None
if self.freezer:
loc = '_'.join([
'CRC',
self.freezer,
str(self.rack) + ':' + str(self.shelf),
str(self.box) + ':' + str(self.position_in_box),
])
return loc
#============================
# Sequencing models
#----------------------------
class Sequencing(models.Model, FieldValue):
"""
Sequencing information.
"""
fields_to_exclude = ['ID', 'Library', 'Chip ID']
values_to_exclude = ['id', 'library', 'pool_id']
## track history
history = HistoricalRecords(
table_name='history_sequencing'
)
## database relationships
library = models.ForeignKey(
Library,
verbose_name="Library",
on_delete=models.CASCADE,
)
## choices
sequencing_instrument_choices = (
('HX','HiSeqX'),
('H2500','HiSeq2500'),
('N550','NextSeq550'),
('MI','MiSeq'),
('O','other'),
)
sequencing_output_mode_choices = (
('L','Low'),
('M','Medium'),
('H','High'),
)
read_type_choices = (
('P', 'PET'),
('S', 'SET')
)
## fields
pool_id = create_chrfield("Chip ID")
adapter = create_chrfield(
"Adapter",
default="CTGTCTCTTATACACATCT"
)
format_for_data_submission = create_chrfield(
"Format for data dissemination",
default="fastq"
)
index_read_type = create_chrfield(
"Index read type",
default="on 2nd and 3rd index-specific read"
)
index_read1_length = create_intfield(
"Index read1 length",
default=6
)
index_read2_length = create_intfield(
"Index read2 length",
default=6
)
read_type = create_chrfield(
"Read type",
choices=read_type_choices,
default="P"
)
read1_length = create_intfield(
"Read1 length",
default=125,
)
read2_length = create_intfield(
"Read2 length",
default=125
)
sequencing_goal = create_chrfield("Sequencing goal (# lanes)")
sequencing_instrument = create_chrfield(
"Sequencing instrument",
choices=sequencing_instrument_choices,
default="H2500"
)
sequencing_output_mode = create_chrfield(
"Sequencing output mode",
choices=sequencing_output_mode_choices,
# default="Low"
)
short_description_of_submission = create_chrfield(
"Short description of submission",
max_length=150
)
submission_date = models.DateField(
"Submission date",
null=True,
blank=True,
)
relates_to = models.ManyToManyField(
"self",
verbose_name="Relates to",
# null=True,
blank=True,
)
objects = SequencingManager()
def has_sequencing_detail(self):
return hasattr(self,
'sequencingdetail')
def get_absolute_url(self):
return reverse("core:sequencing_detail", kwargs={"pk": self.pk})
def __str__(self):
s = 'SEQ_' + self.library.get_library_id()
if self.has_sequencing_detail():
s += '_' + self.sequencingdetail.flow_cell_id
return s
class SequencingDetail(models.Model, FieldValue):
"""
Sequencing details.
"""
fields_to_exclude = ['ID', 'Sequencing']
values_to_exclude = ['id', 'sequencing']
## track history
# history = HistoricalRecords(
# table_name='history_sequencing_detail'
# )
## database relationships
sequencing = models.OneToOneField(
Sequencing,
verbose_name="Sequencing",
on_delete=models.CASCADE,
null=True,
)
## fields
flow_cell_id = create_chrfield("Flow cell/Lane ID")
gsc_library_id = create_chrfield("GSC library ID")
# lane_id = create_chrfield("Lane ID")
path_to_archive = create_chrfield(
"Path to archive",
max_length=150
)
sequencer_id = create_chrfield("Sequencer ID")
sequencing_center = create_chrfield(
"Sequencing center",
default="BCCAGSC"
)
sequencer_notes = create_textfield("Sequencing notes")
|
|
#!/usr/bin/env python
import argparse
import datetime
import logging
import os
import statistics
import sys
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial
from multiprocessing.pool import ThreadPool
import math
from eviltransform import gcj2wgs_exact
from tqdm import tqdm
from .api.booking_api import BookingApi, BookingListApi, LIMIT_REQUESTS_PER_MINUTE
from .api.exceptions import GettingMinPriceError
SUPPORTED_LANGUAGES = ("en", "ru", "ar", "cs", "da", "nl", "fi", "fr", "de",
"hu", "id", "it", "ja", "ko", "pl", "pt", "ro", "es",
"sv", "th", "tr", "uk", "vi", "zh", "he", "sk", "el")
class BookingGen:
def __init__(self, api, country):
self.api = api
self.country_code = country["country"]
self.country_name = country["name"]
logging.info(f"Download[{self.country_code}]: {self.country_name}")
extras = ["hotel_info", "room_info"]
self.hotels = self._download_hotels(extras=extras)
self.translations = self._download_translations()
self.currency_medians = self._currency_medians_by_cities()
def generate_tsv_rows(self, sep="\t"):
self._fix_hotels()
return (self._create_tsv_hotel_line(hotel, sep) for hotel in self.hotels)
@staticmethod
def _get_hotel_min_price(hotel):
prices = (float(x["room_info"]["min_price"]) for x in hotel["room_data"])
flt = filter(lambda x: not math.isclose(x, 0.0), prices)
try:
return min(flt)
except ValueError:
raise GettingMinPriceError(f"Getting min price error: {prices}.")
@staticmethod
def _format_string(s):
s = s.strip()
for x in (("\t", " "), ("\n", " "), ("\r", "")):
s = s.replace(*x)
return s
def _download_hotels(self, **params):
return self.api.hotels(country_ids=self.country_code, **params)
def _download_translations(self):
extras = ["hotel_info", ]
translations = defaultdict(dict)
with ThreadPoolExecutor(max_workers=len(SUPPORTED_LANGUAGES)) as executor:
m = {executor.submit(self._download_hotels, extras=extras, language=lang): lang
for lang in SUPPORTED_LANGUAGES}
for future in as_completed(m):
lang = m[future]
hotels = future.result()
for hotel in hotels:
hotel_id = hotel["hotel_id"]
hotel_data = hotel["hotel_data"]
translations[hotel_id][lang] = {
"name": BookingGen._format_string(hotel_data["name"]),
"address": BookingGen._format_string(hotel_data["address"])
}
return translations
def _fix_hotels(self):
if self.country_code == "cn":
# Fix chinese coordinates.
# https://en.wikipedia.org/wiki/Restrictions_on_geographic_data_in_China
for hotel in self.hotels:
hotel_data = hotel["hotel_data"]
location = hotel_data["location"]
try:
location["latitude"], location["longitude"] = gcj2wgs_exact(
float(location["latitude"]), float(location["longitude"])
)
except ValueError:
logging.exception(f"Converting error {location}")
def _currency_medians_by_cities(self):
cities = defaultdict(lambda: defaultdict(list))
for hotel in self.hotels:
hotel_data = hotel["hotel_data"]
city_id = hotel_data["city_id"]
currency = hotel_data["currency"]
try:
price = BookingGen._get_hotel_min_price(hotel)
except GettingMinPriceError:
logging.exception("Getting min price error.")
continue
cities[city_id][currency].append(price)
for city in cities:
for currency in cities[city]:
cities[city][currency] = statistics.median(cities[city][currency])
return cities
def _get_rate(self, hotel):
# Price rate ranges, relative to the median price for a city
rates = (0.7, 1.3)
rate = 0
hotel_data = hotel["hotel_data"]
city_id = hotel_data["city_id"]
currency = hotel_data["currency"]
price = None
try:
price = BookingGen._get_hotel_min_price(hotel)
except GettingMinPriceError:
logging.exception("Getting min price error.")
return rate
avg = self.currency_medians[city_id][currency]
rate = 1
# Find a range that contains the price
while rate <= len(rates) and price > avg * rates[rate - 1]:
rate += 1
return rate
def _get_translations(self, hotel):
try:
tr = self.translations[hotel["hotel_id"]]
except KeyError:
return ""
hotel_data = hotel["hotel_data"]
name = hotel_data["name"]
address = hotel_data["address"]
tr_ = defaultdict(dict)
for k, v in tr.items():
n = v["name"] if v["name"] != name else ""
a = v["address"] if v["address"] != address else ""
if a or n:
tr_[k]["name"] = n
tr_[k]["address"] = a
tr_list = []
for tr_lang, tr_values in tr_.items():
tr_list.append(tr_lang)
tr_list.extend([tr_values[e] for e in ("name", "address")])
return "|".join(s.replace("|", ";") for s in tr_list)
def _create_tsv_hotel_line(self, hotel, sep="\t"):
hotel_data = hotel["hotel_data"]
location = hotel_data["location"]
row = (
hotel["hotel_id"],
f"{location['latitude']:.6f}",
f"{location['longitude']:.6f}",
hotel_data["name"],
hotel_data["address"],
hotel_data["class"],
self._get_rate(hotel),
hotel_data["ranking"],
hotel_data["review_score"],
hotel_data["url"],
hotel_data["hotel_type_id"],
self._get_translations(hotel)
)
return sep.join(BookingGen._format_string(str(x)) for x in row)
def download_hotels_by_country(api, country):
generator = BookingGen(api, country)
rows = list(generator.generate_tsv_rows())
logging.info(f"For {country['name']} {len(rows)} lines were generated.")
return rows
def download(country_code, user, password, path, threads_count,
progress_bar=tqdm(disable=True)):
api = BookingApi(user, password, "2.4")
list_api = BookingListApi(api)
countries = list_api.countries(languages="en")
if country_code is not None:
countries = list(filter(lambda x: x["country"] in country_code, countries))
logging.info(f"There is {len(countries)} countries.")
progress_bar.desc = "Countries"
progress_bar.total = len(countries)
with open(path, "w") as f:
with ThreadPool(threads_count) as pool:
for lines in pool.imap_unordered(partial(download_hotels_by_country, list_api),
countries):
f.writelines([f"{x}\n" for x in lines])
progress_bar.update()
logging.info(f"Hotels were saved to {path}.")
def process_options():
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--logfile", default="",
help="Name and destination for log file")
parser.add_argument("--password", required=True, dest="password",
help="Booking.com account password")
parser.add_argument("--user", required=True, dest="user",
help="Booking.com account user name")
parser.add_argument("--threads_count", default=1, type=int,
help="The number of threads for processing countries.")
parser.add_argument("--output", required=True, dest="output",
help="Name and destination for output file")
parser.add_argument("--country_code", default=None, action="append",
help="Download hotels of this country.")
options = parser.parse_args()
return options
def main():
options = process_options()
logfile = ""
if options.logfile:
logfile = options.logfile
else:
now = datetime.datetime.now()
name = f"{now.strftime('%d_%m_%Y-%H_%M_%S')}_booking_hotels.log"
logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
print(f"Logs saved to {logfile}.", file=sys.stdout)
if options.threads_count > 1:
print(f"Limit requests per minute is {LIMIT_REQUESTS_PER_MINUTE}.", file=sys.stdout)
logging.basicConfig(level=logging.DEBUG, filename=logfile,
format="%(thread)d [%(asctime)s] %(levelname)s: %(message)s")
with tqdm(disable=not options.verbose) as progress_bar:
download(options.country_code, options.user, options.password,
options.output, options.threads_count, progress_bar)
if __name__ == "__main__":
main()
|
|
import os
import logging
import sys
import time
import json
import base64
import shutil
import etcd
import random
import subprocess
import boto.route53
class DnsCtl:
REGION = os.environ['EC2_REGION']
DOMAIN = os.environ['DOMAIN']
MAX_MEMBERS = 5 # max number of members allowed in the cluster
RECORD_TTL = 600 # number of seconds before marking the DNS record a invalid
CLIENT_TTL = 100 # number of seconds before removing a hung member from DNS record
def __init__(self, ip):
self.conn = boto.route53.connect_to_region(self.REGION)
self.zone = self.conn.get_zone(self.DOMAIN + '.')
self.ip = ip
def get_record(self):
try:
dns_record = self.zone.find_records('_' + self.REGION + '.' + self.DOMAIN, 'TXT')
if dns_record:
dns_record = dns_record.resource_records[0].strip('"')
dns_record = json.loads(base64.b64decode(dns_record))
if not isinstance(dns_record.get('ttl'), int) or \
not isinstance(dns_record.get('members'), list) or \
abs(time.time() - dns_record['ttl']) > self.RECORD_TTL:
return None
return dns_record
except Exception as e:
logging.exception("Failed")
return None
def verify_record(self):
dns_record = self.get_record()
if dns_record:
for member in dns_record['members']:
ip, ttl, _ = member.split(':')
if ip == self.ip and int(ttl) >= self.CLIENT_TTL / 2:
return dns_record
return None
def write_record(self, dns_record):
txt = json.dumps(dns_record)
change_set = boto.route53.record.ResourceRecordSets(self.conn, self.zone.id)
change = change_set.add_change('UPSERT', '_' + self.REGION + '.' + self.DOMAIN, type="TXT", ttl=30)
change.add_value('"' + base64.b64encode(bytes(txt)) + '"')
change_set.commit()
return self.verify_record()
def set_record(self, dns_record, leader_ip):
try:
is_member = False
ttl_diff = int(abs(time.time() - dns_record['ttl'])) + 1
members = []
for member in dns_record['members']:
ip, ttl, is_leader = member.split(':')
ttl = int(ttl)
is_leader = bool(int(is_leader))
if leader_ip:
is_leader = leader_ip == ip
if ip == self.ip:
is_member = True
ttl = self.CLIENT_TTL
else:
ttl -= ttl_diff
if ttl > 0:
members += [ip + ':' + str(ttl) + ':' + str(int(is_leader))]
if not is_member and len(members) < self.MAX_MEMBERS:
is_member = True
members += [self.ip + ':' + str(self.CLIENT_TTL) + ':' + str(int(self.ip == leader_ip))]
if is_member:
dns_record = {'ttl': int(time.time()), 'members': members}
return self.write_record(dns_record)
return dns_record
except Exception as e:
logging.exception("Failed")
return None
@staticmethod
def init_record():
return {"ttl": int(time.time()), "members": []}
@staticmethod
def get_leader_ip(dns_record):
for member in dns_record['members']:
ip, _, is_leader = member.split(':')
if bool(int(is_leader)):
return ip
return None
class EtcdCtl:
MIN_MEMBERS = 3
DATA_DIR = os.environ['ETCD_DATA_DIR']
CLIENT_PORT = int(os.environ['ETCD_CLIENT_PORT'])
SERVER_PORT = int(os.environ['ETCD_SERVER_PORT'])
def __init__(self, ip):
self.etcd_proc = None
self.ip = ip
self.current_cluster_state = 'stopped'
self.current_cluster_list = []
def get_record(self, leader_ip):
etcd_host = leader_ip
if self.etcd_proc:
etcd_host = '127.0.0.1'
if etcd_host:
try:
etcd_client = etcd.Client(host=etcd_host, port=self.CLIENT_PORT, read_timeout=5)
members = [ m.strip('https://').split(':')[0] for m in etcd_client.machines]
leader = etcd_client.leader['clientURLs'][0].strip('https://').split(':')[0]
return {'leader': leader, 'members': members}
except Exception as e:
logging.exception("Failed")
pass
return {'leader': leader_ip, 'members': []}
@staticmethod
def merge_record(etcd_record, dns_record):
for member in dns_record['members']:
ip, _, is_leader = member.split(':')
if not ip in etcd_record['members']:
etcd_record['members'] += [ip]
if not etcd_record['leader'] and bool(int(is_leader)):
etcd_record['leader'] = ip
return etcd_record
def stop_proc(self):
if self.etcd_proc:
try:
self.etcd_proc.kill()
self.etcd_proc.wait()
except Exception as e:
logging.exception("Failed")
pass
self.etcd_proc = None
self.current_cluster_state = 'stopped'
def build_initial_cluster(self, cluster_list):
var = ""
for member in cluster_list:
var += member + '=http://' + member + ':' + str(self.SERVER_PORT) + ','
var.rstrip(',')
return var
def build_endpoint(self, cluster_list):
var = ""
for member in cluster_list:
if member != self.ip:
var += 'http://' + member + ':' + str(self.CLIENT_PORT) + ','
var.rstrip(',')
return var
def start_proc(self, cluster_list, cluster_state):
os.environ['ETCD_INITIAL_CLUSTER'] = self.build_initial_cluster(cluster_list)
if cluster_state == 'member':
subprocess.call(['etcdctl',
'--endpoint',
self.build_endpoint(cluster_list),
'member',
'add',
self.ip,
os.environ['ETCD_INITIAL_ADVERTISE_PEER_URLS']])
shutil.rmtree(self.DATA_DIR + '/proxy', ignore_errors=True)
os.environ['ETCD_INITIAL_CLUSTER_STATE'] = 'existing'
os.unsetenv('ETCD_PROXY')
elif cluster_state == 'bootstrap':
shutil.rmtree(self.DATA_DIR + '/member', ignore_errors=True)
shutil.rmtree(self.DATA_DIR + '/proxy', ignore_errors=True)
os.environ['ETCD_INITIAL_CLUSTER_STATE'] = 'new'
os.unsetenv('ETCD_PROXY')
else:
shutil.rmtree(self.DATA_DIR + '/member', ignore_errors=True)
os.unsetenv('ETCD_INITIAL_CLUSTER_STATE')
os.environ['ETCD_PROXY'] = 'on'
logging.info("Starting etcd process in %s mode - %s" % (cluster_state, cluster_list))
self.etcd_proc = subprocess.Popen('/usr/local/bin/etcd')
self.current_cluster_list = cluster_list
self.current_cluster_state = cluster_state
def check_proc(self, etcd_record):
if not self.ip in etcd_record['members']:
cluster_state = 'proxy'
elif etcd_record['leader']:
cluster_state = 'member'
elif len(etcd_record['members']) >= self.MIN_MEMBERS:
cluster_state = 'bootstrap'
else:
logging.error("not enough members in ETCD cluster - %s" % etcd_record['members'])
return None
if sorted(etcd_record['members']) != self.current_cluster_list or \
cluster_state != self.current_cluster_state or \
self.etcd_proc is None or not self.etcd_proc.poll() is None:
self.stop_proc()
self.start_proc(sorted(etcd_record['members']), cluster_state)
def run(dns_ctl, etcd_ctl, interval):
dns_record = dns_ctl.get_record()
if not dns_record:
dns_record = dns_ctl.init_record()
logging.info('DNS Record - %s' % dns_record)
etcd_record = etcd_ctl.get_record(dns_ctl.get_leader_ip(dns_record))
dns_record = dns_ctl.set_record(dns_record, etcd_record['leader'])
if not dns_record:
# DNS update failed, try again
time.sleep(1)
else:
# DNS updated, start etcd process
etcd_ctl.check_proc(etcd_ctl.merge_record(etcd_record, dns_record))
time.sleep(interval)
if __name__ == "__main__":
CHECK_INTERVAL = 10 # number of seconds between checks
SELF_IP = os.environ['ETCD_NAME']
DNS_CTL = DnsCtl(SELF_IP)
ETCD_CTL = EtcdCtl(SELF_IP)
ETCD_MONITOR = subprocess.Popen(['/bin/etcd_monitor.sh'])
while True:
if ETCD_MONITOR.poll() is not None:
logging.critical("etcd monitor script died")
sys.exit(1)
run(DNS_CTL, ETCD_CTL, CHECK_INTERVAL)
sys.stdout.flush()
|
|
# Node Base #
import numpy as np
from abc import ABCMeta, abstractmethod
import ActionBase
# action wrapper for NodeBase.broadcast
class ActionBroadcast(ActionBase.ActionBase):
def __init__(self, node, env, data):
self.node = node
self.env = env
self.data = data
def execute(self):
self.node.broadcast(self.env, self.data)
# action wrapper for NodeBase.process
class ActionProcess(ActionBase.ActionBase):
def __init__(self, node, env, data):
self.node = node
self.env = env
self.data = data
def execute(self):
self.node.process(self.env, self.data)
class InvalidPacket(Exception):
pass
"""
Node Base
Base class for sensor node that allows receiving, processing and
broadcasting as well as methods for triangulation.
"""
class NodeBase(object):
__metaclass__ = ABCMeta
# static node ID
__ID = 0
# static node broadcast delay (in ticks)
BRC_DELAY_MIN = 0
BRC_DELAY_MAX = 5
# static node processing delay (in ticks)
PRC_DELAY_MIN = 0
PRC_DELAY_MAX = 5
def __init__(self, x, y, ss):
# set id of node
self.id = NodeBase.__ID
NodeBase.__ID += 1
# set position
self.pos = np.array([x, y]) * 1.0
# signal strength
self.ss = ss
# list of all landmarks
self.landmarks = list()
# distances to the landmarks
self.landmark_dists = dict()
# keep track of the number of broadcasts
self.broadcasts = 0
# Network #
def schedule_broadcast(self, env, data):
# create new broadcast action
action = ActionBroadcast(self, env, data)
# schedule broadcast
env.action_manager.queue_random(action, \
NodeBase.BRC_DELAY_MIN, NodeBase.BRC_DELAY_MAX)
# increase number of queue broadcasts
self.broadcasts += 1
def broadcast(self, env, data):
# decrease number of queued broadcasts
self.broadcasts -= 1
# broadcast at node position with signal strength some data
env.broadcast(self, data)
def schedule_process(self, env, data):
# create new process action_manager
action = ActionProcess(self, env, data)
# schedule process
env.action_manager.queue_random(action, \
NodeBase.PRC_DELAY_MIN, NodeBase.PRC_DELAY_MAX)
@abstractmethod
def process(self, env, data):
""" processing of received data """
pass
@abstractmethod
def is_valid(self, data):
""" pre-check if to accept data packet """
pass
def receive(self, env, data):
if not self.is_valid(data):
raise InvalidPacket # catched by env
# schedule processing of received data
self.schedule_process(env, data)
# "Triangulation" #
def add_landmark(self, landmark, distance):
self.landmarks.append(landmark)
self.landmark_dists[landmark] = distance
# fi(u)
def f_u(self, u, landmark):
return distance(u, landmark.pos) - self.landmark_dists[landmark]
# f(u)
def f_u_vec(self, u):
f_vec = np.empty(len(self.landmarks))
for idx in range(0, len(self.landmarks)):
f_vec[idx] = self.f_u(u, self.landmarks[idx])
return f_vec
# J(u)
def jacobi(self, u):
J = np.empty([len(self.landmarks), 2])
for idx in range(0, len(self.landmarks)):
J[idx][0] = distance_dx(u, self.landmarks[idx].pos)
J[idx][1] = distance_dy(u, self.landmarks[idx].pos)
return J
def triangulate(self, t):
if len(self.landmarks) == 0:
print "WARNING: cannot calculate position without landmarks"
raise RuntimeError("no landmarks available!")
if t == "GNA" or t == "gna":
u = self.triangulate_gna()
elif t == "GDM" or t == "gdm":
u = self.triangulate_gdm()
else:
raise ValueError("unkown type: " + t)
if np.isnan(u).any() == True:
raise RuntimeError("position estimation resulted in invalid position")
return u
def increment_gna(self, u):
f = self.f_u_vec(u)
J = self.jacobi(u)
Jt = np.transpose(J)
JtJinv = -(np.linalg.inv(np.dot(Jt, J)))
JtJinvJt = np.dot(JtJinv, Jt)
return np.dot(JtJinvJt, f)
def triangulate_gna(self):
u = np.array([0, 0])
for it in range(0, 1000):
inc = self.increment_gna(u)
if np.linalg.norm(inc) < 0.05:
break
u = u + inc
return u
def increment_gdm(self, u):
f = self.f_u_vec(u)
Jt = np.transpose(self.jacobi(u))
# eta = 1.5 see paper
alpha = 1.5 / len(self.landmarks)
return -alpha * np.dot(Jt, f)
def triangulate_gdm(self):
u = np.array([0, 0])
for it in range(0, 1000):
inc = self.increment_gdm(u)
if np.linalg.norm(inc) < 0.05:
break;
u = u + inc
return u
def distance(p1, p2):
d = p1 - p2
return np.sqrt(np.dot(d, d))
def distance_dx(p1, p2):
d = p1[0] - p2[0]
return d / distance(p1, p2)
def distance_dy(p1, p2):
d = p1[1] - p2[1]
return d / distance(p1, p2)
|
|
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from .test_forms import AuthorForm, ContactForm
from .models import Artist, Author, Book, Page, BookSigning
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ContactView(generic.FormView):
form_class = ContactForm
success_url = reverse_lazy('authors_list')
template_name = 'generic_views/form.html'
class ArtistCreate(generic.CreateView):
model = Artist
fields = '__all__'
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
fields = '__all__'
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
fields = '__all__'
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
fields = '__all__'
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
fields = '__all__'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
fields = '__all__'
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset,self).get_object(
queryset=Book.objects.filter(pk=2))
class CustomMultipleObjectMixinView(generic.list.MultipleObjectMixin, generic.View):
queryset = [
{'name': 'John'},
{'name': 'Yoko'},
]
def get(self, request):
self.object_list = self.get_queryset()
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run a group of subprocesses and then finish."""
import hashlib
import multiprocessing
import os
import platform
import signal
import string
import subprocess
import sys
import tempfile
import time
import xml.etree.cElementTree as ET
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform.system() == "Windows":
pass
else:
have_alarm = False
def alarm_handler(unused_signum, unused_frame):
global have_alarm
have_alarm = False
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [ 31, 0 ],
'green': [ 32, 0 ],
'yellow': [ 33, 0 ],
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
}
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
try:
if platform.system() == 'Windows' or not sys.stdout.isatty():
if explanatory_text:
print explanatory_text
print '%s: %s' % (tag, msg)
return
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
_CLEAR_LINE,
'\n%s' % explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1],
_COLORS[_TAG_COLOR[tag]][0],
tag,
msg,
'\n' if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
except:
pass
message.old_tag = ""
message.old_msg = ""
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self, cmdline, shortname=None, environ=None, hash_targets=None,
cwd=None, shell=False, timeout_seconds=5*60):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
hash_targets: which files to include in the hash representing the jobs version
(or empty, indicating the job should not be hashed)
"""
if environ is None:
environ = {}
if hash_targets is None:
hash_targets = []
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.hash_targets = hash_targets or []
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
def identity(self):
return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
class Job(object):
"""Manages one job."""
def __init__(self, spec, bin_hash, newline_on_success, travis, add_env, xml_report):
self._spec = spec
self._bin_hash = bin_hash
self._tempfile = tempfile.TemporaryFile()
env = os.environ.copy()
for k, v in spec.environ.iteritems():
env[k] = v
for k, v in add_env.iteritems():
env[k] = v
self._start = time.time()
message('START', spec.shortname, do_newline=travis)
self._process = subprocess.Popen(args=spec.cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
cwd=spec.cwd,
shell=spec.shell,
env=env)
self._state = _RUNNING
self._newline_on_success = newline_on_success
self._travis = travis
self._xml_test = ET.SubElement(xml_report, 'testcase',
name=self._spec.shortname) if xml_report is not None else None
def state(self, update_cache):
"""Poll current state of the job. Prints messages at completion."""
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self._tempfile.seek(0)
stdout = self._tempfile.read()
filtered_stdout = filter(lambda x: x in string.printable, stdout.decode(errors='ignore'))
# TODO: looks like jenkins master is slow because parsing the junit results XMLs is not
# implemented efficiently. This is an experiment to workaround the issue by making sure
# results.xml file is small enough.
filtered_stdout = filtered_stdout[-128:]
if self._xml_test is not None:
self._xml_test.set('time', str(elapsed))
ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
if self._process.returncode != 0:
self._state = _FAILURE
message('FAILED', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout, do_newline=True)
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'failure', message='Failure').text
else:
self._state = _SUCCESS
message('PASSED', '%s [time=%.1fsec]' % (self._spec.shortname, elapsed),
do_newline=self._newline_on_success or self._travis)
if self._bin_hash:
update_cache.finished(self._spec.identity(), self._bin_hash)
elif self._state == _RUNNING and time.time() - self._start > self._spec.timeout_seconds:
self._tempfile.seek(0)
stdout = self._tempfile.read()
filtered_stdout = filter(lambda x: x in string.printable, stdout.decode(errors='ignore'))
message('TIMEOUT', self._spec.shortname, stdout, do_newline=True)
self.kill()
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
ET.SubElement(self._xml_test, 'error', message='Timeout')
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
self._process.terminate()
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
stop_on_failure, add_env, cache, xml_report):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._newline_on_success = newline_on_success
self._travis = travis
self._cache = cache
self._stop_on_failure = stop_on_failure
self._hashes = {}
self._xml_report = xml_report
self._add_env = add_env
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while len(self._running) >= self._maxjobs:
if self.cancelled(): return False
self.reap()
if self.cancelled(): return False
if spec.hash_targets:
if spec.identity() in self._hashes:
bin_hash = self._hashes[spec.identity()]
else:
bin_hash = hashlib.sha1()
for fn in spec.hash_targets:
with open(which(fn)) as f:
bin_hash.update(f.read())
bin_hash = bin_hash.hexdigest()
self._hashes[spec.identity()] = bin_hash
should_run = self._cache.should_run(spec.identity(), bin_hash)
else:
bin_hash = None
should_run = True
if should_run:
self._running.add(Job(spec,
bin_hash,
self._newline_on_success,
self._travis,
self._add_env,
self._xml_report))
return True
def reap(self):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = job.state(self._cache)
if st == _RUNNING: continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
self._running.remove(job)
if dead: return
if (not self._travis):
message('WAITING', '%d jobs running, %d complete, %d failed' % (
len(self._running), self._completed, self._failures))
if platform.system() == 'Windows':
time.sleep(0.1)
else:
global have_alarm
if not have_alarm:
have_alarm = True
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled: return True
if not self._check_cancelled(): return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled(): pass # poll cancellation
self.reap()
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
# cache class that caches nothing
class NoCache(object):
def should_run(self, cmdline, bin_hash):
return True
def finished(self, cmdline, bin_hash):
pass
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
cache=None,
xml_report=None,
add_env={}):
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
cache if cache is not None else NoCache(),
xml_report)
for cmdline in cmdlines:
if not js.start(cmdline):
break
return js.finish()
|
|
# -*- encoding: cp1251 -*-
from __future__ import absolute_import, print_function
from pony.py23compat import izip, iteritems
import re, threading, os.path, copy, cPickle
from operator import attrgetter
from itertools import count, cycle
import datetime
from pony import auth
from pony.utils import decorator
from pony.converting import str2py, ValidationError, converters, str2date
from pony.templating import Html, StrHtml, htmljoin, htmltag, html
from pony.web import http
from pony.webutils import component
class FormNotProcessed(Exception): pass
class FormMeta(type):
def __new__(meta, name, bases, dict):
if 'Form' in globals():
for value in dict.values():
if isinstance(value, HtmlField): raise TypeError(
'You cannot place fields inside form class directly. Use __init__ method instead')
init = dict.get('__init__')
if init is not None: dict['__init__'] = _form_init_decorator(init)
return super(FormMeta, meta).__new__(meta, name, bases, dict)
@decorator
def _form_init_decorator(__init__, form, *args, **kwargs):
try: init_counter = form._init_counter
except AttributeError:
if form.__class__ is not Form: Form.__init__.original_func(form)
object.__setattr__(form, '_init_args', cPickle.dumps((args, kwargs), 2))
object.__setattr__(form, '_init_counter', 1)
else: object.__setattr__(form, '_init_counter', init_counter+1)
try: __init__(form, *args, **kwargs)
finally:
init_counter = form._init_counter
object.__setattr__(form, '_init_counter', init_counter-1)
http_303_incompatible_browsers = []
DEFAULT = object()
class Form(object):
__metaclass__ = FormMeta
def __setattr__(form, name, x):
prev = getattr(form, name, None)
if not isinstance(x, HtmlField):
if isinstance(prev, HtmlField): form.__delattr__(name)
object.__setattr__(form, name, x)
return
if name == 'submit': raise TypeError('Invalid form field name: submit')
if hasattr(form, name):
if not isinstance(prev, HtmlField):
raise TypeError('Invalid form field name: %s' % name)
try:
# try..except is necessary because __init__ can be called twice
if isinstance(prev, Hidden): form.hidden_fields.remove(prev)
elif isinstance(prev, Submit): form.submit_fields.remove(prev)
else: form.fields.remove(prev)
except ValueError: pass
if isinstance(x, Hidden): form.hidden_fields.append(x)
elif isinstance(x, Submit): form.submit_fields.append(x)
else: form.fields.append(x)
object.__setattr__(form, name, x)
x._init_(form, name, name.replace('_', ' ').capitalize())
def __delattr__(form, name):
x = getattr(form, name)
if isinstance(x, HtmlField):
try:
# try..except is necessary because __init__ can be called twice
if isinstance(x, Hidden): form.hidden_fields.remove(x)
elif isinstance(x, Submit): form.submit_fields.remove(x)
else: form.fields.remove(x)
except ValueError: pass
object.__delattr__(form, name)
def __init__(form, method='GET', secure=DEFAULT,
prevent_resubmit=True, buttons_align=None, **attrs):
# Note for subclassers: __init__ can be called twice!
object.__setattr__(form, '_pickle_entire_form', False)
object.__setattr__(form, '_cleared', False)
object.__setattr__(form, '_validated', False)
object.__setattr__(form, '_error_text', None)
object.__setattr__(form, '_request', http.request)
object.__setattr__(form, 'attrs', dict((name.lower(), str(value))
for name, value in iteritems(attrs)))
object.__setattr__(form, 'fields', [])
object.__setattr__(form, 'hidden_fields', [])
object.__setattr__(form, 'submit_fields', [])
object.__setattr__(form, '_secure', False)
form._set_method(method)
if secure is DEFAULT: secure = (method=='POST')
form._set_secure(secure)
object.__setattr__(form, 'prevent_resubmit', prevent_resubmit)
object.__setattr__(form, 'buttons_align', buttons_align)
if form.__class__ is not Form and 'name' not in attrs: form.attrs['name'] = form.__class__.__name__
name = form.attrs.get('name')
if name: form._f = Hidden(name)
def __getstate__(form):
state = form._init_args
if form._pickle_entire_form or state is None:
state = form.__dict__.copy()
for attr in ('_pickle_entire_form', '_init_args', '_init_counter',
'_cleared', '_validated', '_error_text', '_request', 'is_submitted'):
state.pop(attr, None)
return state
def __setstate__(form, state):
if isinstance(state, str):
args, kwargs = cPickle.loads(state)
form.__init__(*args, **kwargs)
elif isinstance(state, dict):
state['_pickle_entire_form'] = True
state['_init_args'] = None
state['_init_counter'] = 0
state['_cleared'] = state['_validated'] = False
state['_error_text'] = None
state['_request'] = http.request
form.__dict__.update(state)
form._update_status()
else: assert False # pragma: no cover
def _handle_request_(form):
request = http.request
if not form.is_valid:
request.form_processed = False
return
try: without_redirect = form.on_submit()
except FormNotProcessed: request.form_processed = False
else:
if request.form_processed is None: request.form_processed = True
if without_redirect: return
user_agent = request.environ.get('HTTP_USER_AGENT', '')
for browser in http_303_incompatible_browsers:
if browser in user_agent: raise http.Redirect('.', status='302 Found')
raise http.Redirect(request.full_url, status='303 See Other')
def _get_data(form):
result = {}
for f in form.hidden_fields:
if f.name not in ('_f', '_t'): result[f.name] = f.value
for f in form.fields: result[f.name] = f.value
return result
def _set_data(form, d):
fields = []
for name, value in d.items():
f = getattr(form, name, None)
if f: fields.append(f)
else: raise ValueError("There is no field named '%s' in the form" % name)
for f in fields:
f.value = d[f.name]
data = property(_get_data, _set_data)
def clear(form):
object.__setattr__(form, '_cleared', True)
object.__setattr__(form, 'is_submitted', False)
def _set_method(form, method):
method = method.upper()
if method == 'GET': form.secure = False
elif method != 'POST': raise TypeError('Invalid form method: %s (must be GET or POST)' % method)
object.__setattr__(form, '_method', method)
form._update_status()
method = property(attrgetter('_method'), _set_method)
def _set_secure(form, secure):
if secure == form._secure: return
if secure and form._method == 'GET': raise TypeError('GET form cannot be secure')
object.__setattr__(form, '_secure', secure)
if form._secure: form._t = Ticket()
elif hasattr(form, '_t'): del form._t
form._update_status()
secure = property(attrgetter('_secure'), _set_secure)
def _update_status(form):
object.__setattr__(form, 'is_submitted', False)
request = form._request
if form._cleared or request.form_processed: return
if request.form_processed: return
if request.submitted_form != form.attrs.get('name'): return
object.__setattr__(form, 'is_submitted', True)
@property
def is_valid(form):
if not form.is_submitted: return False
if form.method == 'POST' and http.request.method != 'POST': return False
form._validate()
if form._error_text: return False
for f in form.hidden_fields:
if not f.is_valid: return False
for f in form.fields:
if not f.is_valid: return False
if form._secure and not auth.local.ticket:
return auth.local.ticket # may be False or None
return True
def _validate(form):
if form._validated: return
object.__setattr__(form, '_validated', True)
form.validate()
def validate(form):
pass
def _get_error_text(form):
if not form.is_submitted: return None
if form._cleared or form._request.form_processed: return None
if form._error_text is not None: return form._error_text
form._validate()
for f in form.fields:
if f.error_text: return html('@{Some fields below contain errors}')
if form.is_valid is None: return html('@{The form has already been submitted}')
def _set_error_text(form, text):
object.__setattr__(form, '_error_text', text)
error_text = property(_get_error_text, _set_error_text)
@property
def error(form):
error_text = form.error_text
if not error_text: return ''
return Html('\n<div class="error">%s</div>' % error_text)
@property
def tag(form):
attrs = form.attrs
for f in form.fields:
if isinstance(f, File):
attrs['enctype'] = 'multipart/form-data'
break
error_class = 'has-error' if form.error_text else ''
return htmltag('form', attrs, method=form.method, accept_charset='UTF-8',
_class=('pony ' + error_class).strip())
@property
def header(form):
result = [ form.tag ]
for f in form.hidden_fields: result.append(f.html)
for f in form.fields:
hidden = f.hidden
if hidden: result.append(hidden)
return Html('\n') + Html('\n').join(result)
@property
def footer(form):
return Html('</form>')
@property
def table(form):
result = []
for f in form.fields:
classes = f.__class__.__name__.lower() + '-field-row'
if f.error_text: classes += ' has-error'
result.extend((Html('\n<tr class="%s">\n<th>' % classes),
f.label, Html('</th>\n<td>'), f.tag))
error = f.error
if error: result.append(error)
result.append(Html('</td></tr>'))
return htmljoin(result)
@property
def buttons(form):
if not form.submit_fields: return ''
result = [ htmltag('div', _class='buttons', align=form.buttons_align) ]
buttons = [ f.html for f in form.submit_fields ]
result.extend(buttons)
result.append(Html('\n</div>'))
return htmljoin(result)
def __str__(form):
return StrHtml(unicode(form).encode('ascii', 'xmlcharrefreplace'))
def __unicode__(form):
if form.buttons_align is None:
buttons = Html('\n<tr><td> </td><td>%s</td></tr>') % form.buttons
else: buttons = Html('\n<tr><td colspan="2">%s</td></tr>') % form.buttons
return htmljoin([ form.header, form.error,
Html('\n<table>'),
form.table,
buttons,
Html('\n</table>\n'),
form.footer,
Html('\n')])
html = property(__unicode__)
Form.NotProcessed = FormNotProcessed
Form.DoNotDoRedirect = DoNotDoRedirect = True
class HtmlField(object):
def __init__(field, value=None, **attrs):
if 'type' in attrs: raise TypeError('You can set type only for Text fields')
if 'regex' in attrs: raise TypeError('You can set regex only for Text fields')
field.attrs = attrs
field.form = field.name = None
field.initial_value = value
field._label = None
def _init_(field, form, name, label):
object.__setattr__(form, '_validated', False)
field.form = form
field.name = name
if field._label is None: field._label = label
def __getstate__(field):
state = field.__dict__.copy()
# state.pop('_initial_value', None)
state.pop('_new_value', None)
return state
def __setstate__(field, state):
field.__dict__.update(state)
# field._initial_value = None
@property
def is_submitted(field):
form = field.form
if form is None or not form.is_submitted: return False
fields = form._request.fields
if fields.getfirst(field.name) is not None: return True
return fields.getfirst('.' + field.name) is not None
@property
def is_valid(field):
return field.is_submitted
def _get_value(field):
try: return field._new_value
except AttributeError:
if not field.is_submitted: return field.initial_value
value = field.form._request.fields.getfirst(field.name)
if value is None: return None
try: return unicode(value, 'utf8')
except UnicodeDecodeError: raise http.BadRequest
def _set_value(field, value):
form = field.form
if form is None or form._init_counter: field.initial_value = value
else:
field._new_value = value
object.__setattr__(form, '_validated', False)
value = property(_get_value, _set_value)
html_value = property(_get_value)
def __unicode__(field):
value = field.html_value
if value is None: value = ''
return htmltag('input', field.attrs, name=field.name, value=value, type=field.HTML_TYPE,
_class_='%s-field' % field.__class__.__name__.lower())
tag = html = property(__unicode__)
def __str__(field):
return StrHtml(unicode(field).encode('ascii', 'xmlcharrefreplace'))
def __repr__(field):
return '<%s: %s>' % (field.name or '?', field.__class__.__name__)
class Hidden(HtmlField):
HTML_TYPE = 'hidden'
class Ticket(Hidden):
def _get_value(field):
form = field.form
if form is not None and hasattr(form, 'on_submit'): payload = cPickle.dumps(form, 2)
else: payload = None
return auth.get_ticket(payload, form.prevent_resubmit)
def _set_value(field, value):
raise TypeError('Cannot set value for tickets')
value = property(_get_value, _set_value)
html_value = property(_get_value)
class Submit(HtmlField):
HTML_TYPE = 'submit'
def _init_(field, form, name, label, **attrs):
HtmlField._init_(field, form, name, label, **attrs)
if field.initial_value is None: field.initial_value = label
class Reset(Submit):
HTML_TYPE = 'reset'
class BaseWidget(HtmlField):
def __init__(field, label=None, required=None, value=None, **attrs):
if 'id' not in attrs: attrs['id'] = next(http.response.id_counter)
HtmlField.__init__(field, value, **attrs)
field.required = required
field._error_text = None
field._auto_error_text = None
field._set_label(label)
def __getstate__(field):
dict = HtmlField.__getstate__(field)
# dict.pop('_label', None)
dict.pop('_error_text', None)
dict.pop('_auto_error_text', None)
return dict
def __setstate__(field, state):
HtmlField.__setstate__(field, state)
# field.label = None
field._error_text = None
field._auto_error_text = None
@property
def is_valid(field):
return field.is_submitted and not field.error_text
def _get_error_text(field):
form = field.form
if form is None or form._cleared or form._request.form_processed: return None
if field._error_text: return field._error_text
if field.is_submitted: return field._check_error()
return None
def _set_error_text(field, text):
field._error_text = text
error_text = property(_get_error_text, _set_error_text)
def _check_error(field):
value = field.value
if field._auto_error_text: return field._auto_error_text
if field.required and not value: return html('@{This field is required}')
@property
def error(field):
error_text = field.error_text
if not error_text: return ''
return Html('<div class="error">%s</div>') % error_text
def _get_label(field, colon=True, required=True):
if not field._label: return ''
if not (required and field.required): required_html = ''
else: required_html = Html('<sup class="required">*</sup>')
colon_html = Html('<span class="colon">:</span>') if colon else ''
return Html('<label for="%s">%s%s%s</label>') % (
field.attrs['id'], field._label, required_html, colon_html)
def _set_label(field, label):
field._label = label
label = property(_get_label, _set_label)
def __unicode__(field):
return htmljoin((field.label, field.tag, field.error))
html = property(__unicode__)
@property
def hidden(field):
if not field.attrs.get('disabled'): return ''
value = field.html_value
if value is None: value = ''
return htmltag('input', type='hidden', name=field.name, value=value)
class File(BaseWidget):
HTML_TYPE = 'file'
def __init__(field, label=None, required=None, **attrs):
if 'value' in attrs: raise TypeError('Cannot set value of File field')
BaseWidget.__init__(field, label, required, **attrs)
def _init_(field, form, name, label):
if form.method != 'POST': raise TypeError('Only form with method="POST" can contain File fields')
BaseWidget._init_(field, form, name, label)
def _get_value(field):
if not field.is_submitted: return None
fields = field.form._request.fields
try: filename = fields[field.name].filename
except: return None
if not filename: return None
return fields[field.name].file
def _set_value(field, value):
raise TypeError('This property cannot be set')
value = property(_get_value, _set_value)
@property
def filename(field):
if not field.is_submitted: return None
fields = field.form._request.fields
try: filename = fields[field.name].filename
except: return None
if not filename: return None
return os.path.basename(filename)
@property
def tag(field):
return htmltag('input', field.attrs, name=field.name, type=field.HTML_TYPE)
class Password(BaseWidget):
HTML_TYPE = 'password'
class Text(BaseWidget):
HTML_TYPE = 'text'
def __init__(field, label=None, required=None, value=None, type=None, regex=None, **attrs):
BaseWidget.__init__(field, label, required, value, **attrs)
if isinstance(type, basestring) and type not in converters:
raise TypeError('Unknown field type value: %r' % type)
elif isinstance(type, tuple):
if len(type) == 2: type += (None,)
elif len(type) != 3:
raise TypeError('Type tuple length must be 2 or 3. Got: %d' % len(type))
field.type = type
if isinstance(regex, basestring): regex = re.compile(regex, re.UNICODE)
field.regex = regex
def _get_value(field):
value = BaseWidget._get_value(field)
if value is None: return None
if field.regex is not None:
match = field.regex.match(value)
if match is None:
field._auto_error_text = html('@{Invalid data}')
return None
try: return str2py(value, field.type)
except ValidationError as e:
err_msg = e.args[0]
translated_msg = html('@{%s}' % err_msg) # possible template injection?
field._auto_error_text = translated_msg
return None
value = property(_get_value, BaseWidget._set_value)
@property
def html_value(field):
value = BaseWidget._get_value(field)
type = field.type
if value is None or type is None or isinstance(value, unicode): return value
if isinstance(type, tuple): str2py, py2str, err_msg = type
else: str2py, py2str, err_msg = converters.get(type, (field.type, unicode, None))
return py2str(value)
class DatePicker(Text):
def __init__(field, label=None, required=None, value=None, **attrs):
if 'type' in attrs: raise TypeError("You can not set 'type' attribute for DatePicker")
if 'regex' in attrs: raise TypeError("You can not set 'regex' attribute for DatePicker")
Text.__init__(field, label, required, value, **attrs)
@property
@component(css=[ ('/pony/static/jquery/ui.datepicker.css', 'print, projection, screen'),
('/pony/static/jquery/ui.datepicker-ie.css', 'projection, screen', 'if lte IE 7') ],
js=[ '/pony/static/jquery/jquery.js',
'/pony/static/jquery/ui.core.js',
'/pony/static/jquery/ui.datepicker.js',
'/pony/static/js/datepicker.js' ])
def tag(field):
return Text.tag.fget(field)
def _get_value(field):
value = BaseWidget._get_value(field)
if not value: return None
try: return str2date(value)
except: field._auto_error_text = html('@{Incorrect date}')
return None
value = property(_get_value, Text._set_value)
@property
def html_value(field):
value = Text._get_value(field)
if isinstance(value, datetime.date): return value.strftime('%m/%d/%Y')
if value is None: return value
return unicode(value)
class StaticText(BaseWidget):
def __init__(field, value, **attrs):
if 'label' in attrs: raise TypeError("You can not set 'label' attribute for StaticText")
if 'required' in attrs: raise TypeError("You can not set 'required' attribute for StaticText")
BaseWidget.__init__(field, None, None, value, **attrs)
def __unicode__(field):
return Html('<strong>%s</strong>') % field.value
html = tag = property(__unicode__)
@property
def is_valid(field):
return not field.error_text
class TextArea(BaseWidget):
@property
def tag(field):
result = [ htmltag('textarea', field.attrs, name=field.name) ]
if field.value is not None: result.append(field.value)
result.append(Html('</textarea>'))
return htmljoin(result)
class Checkbox(BaseWidget):
HTML_TYPE = 'checkbox'
def _get_value(field):
return bool(BaseWidget._get_value(field))
def _set_value(field, value):
BaseWidget._set_value(field, bool(value))
value = property(_get_value, _set_value)
@property
def tag(field):
result = []
result.append(htmltag('input', field.attrs, name=field.name,
value='yes', checked=bool(field.value),
type = field.HTML_TYPE))
return htmljoin(result)
@property
def hidden(field):
return htmltag('input', name='.'+field.name, type='hidden', value='')
class Select(BaseWidget):
def __init__(field, label=None, required=False, value=None, options=[], **attrs):
BaseWidget.__init__(field, label, required, **attrs)
field._set_options(options)
field.value = value
size = attrs.get('size')
if size is not None: pass
elif not isinstance(field, MultiSelect): field.attrs['size'] = 1
else: field.attrs['size'] = min(len(field.options), 5)
def _set_options(field, options):
field.keys = {}
field.values = {}
options = list(options)
for i, option in enumerate(options):
if isinstance(option, tuple):
if len(option) == 3:
value, description, key = option
key = unicode(key)
elif len(option) == 2:
value, description = option
key = unicode(value)
else: raise TypeError('Invalid option: %r' % option)
description = unicode(description)
else:
value = option
key = description = unicode(value)
option = value, description, key
x = field.keys.setdefault(key, option)
if x is not option: raise TypeError('Duplicate option key: %s' % key)
x = field.values.setdefault(value, option)
if x is not option: raise TypeError('Duplicate option value: %s' % value)
options[i] = option
field._options = tuple(options)
form = field.form
if form is not None: object.__setattr__(form, '_validated', False)
options = property(attrgetter('_options'), _set_options)
def _get_value(field): # for Select and RadioGroup
try: return field._new_value
except AttributeError:
if not field.is_submitted: return field.initial_value
key = field.form._request.fields.getfirst(field.name)
if key is None: return None
try: key = unicode(key, 'utf8')
except UnicodeDecodeError: raise http.BadRequest
option = field.keys.get(key)
if option is None: return None
return option[0]
def _set_value(field, value): # for Select and RadioGroup
if value is not None and value not in field.values:
raise TypeError('Invalid widget value: %r' % value)
form = field.form
if form is None or form._init_counter: field.initial_value = value
else:
field._new_value = value
object.__setattr__(form, '_validated', False)
value = property(_get_value, _set_value)
@property
def tag(field): # for Select and MultiSelect
result = [ htmltag('select', field.attrs, name=field.name, multiple=isinstance(field, MultiSelect)) ]
value = field.value
if isinstance(field, MultiSelect): selection = value
elif value is None: selection = set()
else: selection = set((value,))
for value, description, key in field.options:
if key == description: key = None
result.append(htmltag('option', selected=(value in selection), value=key))
result.append(description)
result.append(Html('</option>'))
result.append(Html('</select>'))
return htmljoin(result)
@property
def hidden(field):
if field.__class__ == Select and str(field.attrs.get('size', '')) == '1': return ''
return htmltag('input', name='.'+field.name, type='hidden', value='')
class AutoSelect(Select):
def __init__(field, label=None, required=False, value=None, options=[], **attrs):
Select.__init__(field, label, required, value, options, onchange='this.form.submit()', **attrs)
@property
def tag(field):
return Select.tag.fget(field) + Html('\n<noscript>\n'
'<input type="submit" value="apply">\n'
'</noscript>\n')
class RadioGroup(Select):
@property
def tag(field):
result = [ htmltag('div', field.attrs, _class='radiobuttons') ]
selected = field.value
for value, description, key in field.options:
result.append(Html('<div class="radiobutton">'))
result.append(htmltag('input', type='radio', name=field.name,
value=key, checked=(value==selected)))
result.append(Html('<span class="value">%s</span></div>') % description)
result.append(Html('</div>'))
result.append(htmltag('input', name='.'+field.name, type='hidden', value=''))
return htmljoin(result)
class MultiSelect(Select):
def _get_value(field):
try: return field._new_value
except AttributeError:
if not field.is_submitted: return field.initial_value.copy()
keys = field.form._request.fields.getlist(field.name)
result = set()
for key in keys:
try: key = unicode(key, 'utf8')
except UnicodeDecodeError: raise http.BadRequest
option = field.keys.get(key)
if option is not None: result.add(option[0])
return result
def _set_value(field, value):
if value is None: values = set()
elif isinstance(value, basestring): values = set((value,))
elif hasattr(value, '__iter__'): values = set(value)
else: values = set((value,))
for value in values:
if value not in field.values: raise TypeError('Invalid widget value: %r' % value)
form = field.form
if form is None or form._init_counter: field.initial_value = values
else:
field._new_value = values
object.__setattr__(form, '_validated', False)
value = property(_get_value, _set_value)
class CheckboxGroup(MultiSelect):
@property
def tag(field):
result = [ htmltag('div', field.attrs, _class='checkboxes') ]
selection = field.value
for value, description, key in field.options:
result.append(Html('<div class="checkboxgroup-item">'))
result.append(htmltag('input', name=field.name, type='checkbox',
value=value, checked=(value in selection)))
result.append(Html('<span class="value">%s</span></div>') % description)
result.append(Html('</div>'))
result.append(htmltag('input', name='.'+field.name, type='hidden', value=''))
return htmljoin(result)
class Composite(BaseWidget):
def __init__(composite, label=None, required=None, show_headers=True, **attrs):
BaseWidget.__init__(composite, label, required, **attrs)
composite.show_headers = show_headers
composite.hidden_fields = []
composite.fields = []
def __setattr__(composite, name, x):
prev = getattr(composite, name, None)
if not isinstance(x, HtmlField):
if isinstance(prev, HtmlField): composite.__delattr__(name)
object.__setattr__(composite, name, x)
return
if composite.form is None: raise TypeError('You must first assign the Composite object to the form')
if hasattr(composite, name):
if not isinstance(prev, HtmlField): raise TypeError('Invalid composite item name: %s' % name)
elif isinstance(prev, Hidden): composite.hidden_fields.remove(prev)
else: composite.fields.remove(prev)
if composite.required is not None and x.required is None: x.required = composite.required
if isinstance(x, Hidden): composite.hidden_fields.append(x)
else: composite.fields.append(x)
object.__setattr__(composite, name, x)
field_name = '%s.%s' % (composite.name, name)
field_label = name.replace('_', ' ').capitalize()
x._init_(composite.form, field_name, field_label)
def __delattr__(composite, name):
x = getattr(composite, name)
if isinstance(x, Hidden): composite.hidden_fields.remove(x)
elif isinstance(x, HtmlField): composite.fields.remove(x)
object.__delattr__(composite, name)
@property
def is_submitted(composite):
form = composite.form
if form is None or not form.is_submitted: return False
for field in composite.fields:
if field.is_submitted: return True
return False
def _get_error_text(composite):
form = composite.form
if form is None or not form.is_submitted: return None
if form._cleared or form._request.form_processed: return None
if composite._error_text: return composite._error_text
result = []
for field in composite.fields:
if isinstance(field, Submit): continue
error_text = field.error_text
if not error_text: continue
result.append('%s: %s' % (field._label, error_text))
result = '\n'.join(result)
if result.isspace(): return None
return result
error_text = property(_get_error_text, BaseWidget._set_error_text)
@property
def error(composite):
error_text = composite.error_text
if not error_text: return ''
error_lines = error_text.split('\n')
return Html('<div class="error">%s</div>' % Html('<br>\n').join(error_lines))
def _get_value(composite):
return (field.value for field in composite.fields if not isinstance(field, Submit))
def _set_value(composite, value):
values = list(value)
fields = [ field for field in composite.fields if not isinstance(field, Submit) ]
if len(fields) != len(values): raise TypeError(
'Expected sequence of %d values. Got: %d' % (len(fields), len(values)))
for field, value in izip(fields, values): field.value = value
value = property(_get_value, _set_value)
## def _get_label(composite, colon=True, required=False):
## return BaseWidget._get_label(composite, colon, required)
## label = property(_get_label, BaseWidget._set_label)
@property
def tag(composite):
result = [ Html('\n<table><tr>') ]
if composite.show_headers:
for i, field in enumerate(composite.fields):
if isinstance(field, Submit): label = Html(' ')
else: label = field._get_label(colon=False)
result.append(Html('<th>%s</th>') % label)
result.append(Html('</tr>\n<tr>'))
for i, field in enumerate(composite.fields):
result.append(Html('<td>%s</td>') % field.tag)
result.append(Html('\n</tr></table>\n'))
return htmljoin(result)
def __unicode__(composite):
return htmljoin((composite.label, composite.tag, composite.error))
html = property(__unicode__)
@property
def hidden(composite):
return htmljoin(field.html for field in composite.hidden_fields)
class Grid(BaseWidget):
def __init__(grid, label=None, columns=None, row_count=0, **attrs):
if columns is None: raise TypeError('%s columns must be specified' % grid.__class__.__name__)
columns = list(columns)
if 'required' in attrs: raise TypeError('%s cannot be required' % grid.__class__.__name__)
BaseWidget.__init__(grid, label, None, **attrs)
grid.columns = columns
grid._rows = []
if row_count: grid.row_count = row_count
def _init_(grid, form, name, label):
BaseWidget._init_(grid, form, name, label)
for i, row in enumerate(grid._rows):
for j, field in enumerate(row):
if field is not None:
field._init_(form, '%s[%d][%d]' % (name, i, j), None)
@property
def col_count(grid):
return len(grid.columns)
def _get_row_count(grid):
return len(grid._rows)
__len__ = _get_row_count
def _set_row_count(grid, size):
delta = size - len(grid._rows)
if delta < 0: grid._rows[delta:] = []
elif delta > 0:
for i in xrange(len(grid._rows), size):
row = tuple(Text() for column in grid.columns)
form = grid.form
if form is not None:
name = grid.name
for j, field in enumerate(row):
field._init_(form, '%s[%d][%d]' % (name, i, j), None)
grid._rows.append(row)
row_count = property(_get_row_count, _set_row_count)
def __iter__(grid):
return iter(grid._rows)
def __getitem__(grid, key):
try: i, j = key
except: return grid._rows[key]
else: return grid._rows[i][j]
def __setitem__(grid, key, value):
try: i, j = key
except: raise TypeError('Key must be pair of integers (row_index, col_index). Got: %r' % key)
row = list(grid._rows[i])
row[j] = value
if value is None: pass
elif not isinstance(value, HtmlField):
raise TypeError('Value must be instance of HtmlField or None. Got: %r' % value)
else: value._init_(grid.form, '%s[%d][%d]' % (grid.name, i, j), None)
grid._rows[i] = tuple(row)
def _get_value(grid):
result = []
for row in grid._rows:
values = []
for x in row:
if x is None: values.append(None)
else: values.append(x.value)
result.append(values)
return result
def _set_value(grid, value):
rows = list(value)
if len(rows) != len(grid._rows): raise TypeError('Incorrect row count')
for i, row in enumerate(rows):
if len(row) != len(grid.columns):
raise TypeError('Incorrect col count in row %d: %d' % (i, len(row)))
for i, row, values in enumerate(izip(grid._rows, rows)):
for field, value in izip(row, values):
if field is not None: field.value = value
value = property(_get_value, _set_value)
@property
def is_valid(grid):
if not grid.is_submitted or grid.error_text: return False
for row in grid._rows:
for field in row:
if field is None: continue
if not field.is_valid: return False
return True
@property
def tag(grid):
result = [ Html('\n<table><tr>') ]
for column in grid.columns:
result.append(Html('<th>%s</th>') % column)
result.append(Html('</tr>\n'))
for row, row_class in izip(grid._rows, cycle(('odd', 'even'))):
result.append(Html('<tr class="%s">') % row_class)
for field in row:
if field is None: result.append(Html('<td> </td>'))
else: result.append(Html('<td>%s</td>') % field.tag)
result.append(Html('</tr>\n'))
result.append(Html('</table>\n'))
return htmljoin(result)
@property
def hidden(grid):
result = [ htmltag('input', name='.'+grid.name, type='hidden', value='') ]
for row in grid._rows:
for field in row:
hidden = getattr(field, 'hidden', None)
if hidden: result.append(hidden)
return Html('\n').join(result)
|
|
r"""
FASTQ format (:mod:`skbio.io.format.fastq`)
===========================================
.. currentmodule:: skbio.io.format.fastq
The FASTQ file format (``fastq``) stores biological (e.g., nucleotide)
sequences and their quality scores in a simple plain text format that is both
human-readable and easy to parse. The file format was invented by Jim Mullikin
at the Wellcome Trust Sanger Institute but wasn't given a formal definition,
though it has informally become a standard file format for storing
high-throughput sequence data. More information about the format and its
variants can be found in [1]_ and [2]_.
Conceptually, a FASTQ file is similar to paired FASTA and QUAL files in that it
stores both biological sequences and their quality scores. FASTQ differs from
FASTA/QUAL because the quality scores are stored in the same file as the
biological sequence data.
An example FASTQ-formatted file containing two DNA sequences and their quality
scores::
@seq1 description 1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@seq2 description 2
TATGTATATATAACATATACATATATACATACATA
+
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |generator of :mod:`skbio.sequence.Sequence` objects |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.alignment.SequenceCollection` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.alignment.Alignment` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.Sequence` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.DNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.RNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.Protein` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
A FASTQ file contains one or more biological sequences and their corresponding
quality scores stored sequentially as *records*. Each *record* consists of four
sections:
1. Sequence header line consisting of a sequence identifier (ID) and
description (both optional)
2. Biological sequence data (typically stored using the standard IUPAC
lexicon), optionally split over multiple lines
3. Quality header line separating sequence data from quality scores (optionally
repeating the ID and description from the sequence header line)
4. Quality scores as printable ASCII characters, optionally split over multiple
lines. Decoding of quality scores will depend on the specified FASTQ variant
(see below for more details)
For the complete FASTQ format specification, see [1]_. scikit-bio's FASTQ
implementation follows the format specification described in this excellent
publication, including validating the implementation against the FASTQ example
files provided in the publication's supplementary data.
.. note:: IDs and descriptions will be parsed from sequence header lines in
exactly the same way as FASTA headers (:mod:`skbio.io.format.fasta`). IDs,
descriptions, and quality scores are also stored automatically on the
object in the same way as with FASTA.
.. note:: Blank or whitespace-only lines are only allowed at the beginning of
the file, between FASTQ records, or at the end of the file. A blank or
whitespace-only line after the header line, within the sequence, or within
quality scores will raise an error.
scikit-bio will ignore leading and trailing whitespace characters on each
line while reading.
.. note:: Validation may be performed depending on the type of object the data
is being read into. This behavior matches that of FASTA files.
.. note:: scikit-bio will write FASTQ files in a normalized format, with each
record section on a single line. Thus, each record will be composed of
*exactly* four lines. The quality header line won't have the sequence ID and
description repeated.
.. note:: `lowercase` functionality is supported the same as with FASTA.
Quality Score Variants
^^^^^^^^^^^^^^^^^^^^^^
FASTQ associates quality scores with sequence data, with each quality score
encoded as a single printable ASCII character. In scikit-bio, all quality
scores are decoded as Phred quality scores. This is the most common quality
score metric, though there are others (e.g., Solexa quality scores).
Unfortunately, different sequencers have different ways of encoding quality
scores as ASCII characters, notably Sanger and Illumina. Below is a table
highlighting the different encoding variants supported by scikit-bio, as well
as listing the equivalent variant names used in the Open Bioinformatics
Foundation (OBF) [3]_ projects (e.g., Biopython, BioPerl, etc.).
+-----------+---------+----+--------+-----------------------------------------+
| Variant | ASCII |Off\|Quality | Notes |
| | Range |set |Range | |
+===========+=========+====+========+=========================================+
|sanger |33 to 126|33 |0 to 93 |Equivalent to OBF's fastq-sanger. |
+-----------+---------+----+--------+-----------------------------------------+
|illumina1.3|64 to 126|64 |0 to 62 |Equivalent to OBF's fastq-illumina. Use |
| | | | |this if your data was generated using |
| | | | |Illumina 1.3-1.7 software. |
+-----------+---------+----+--------+-----------------------------------------+
|illumina1.8|33 to 95 |33 |0 to 62 |Equivalent to sanger but with 0 to 62 |
| | | | |quality score range check. Use this if |
| | | | |your data was generated using Illumina |
| | | | |1.8 software or later. |
+-----------+---------+----+--------+-----------------------------------------+
|solexa |59 to 126|64 |-5 to 62|Not currently implemented. |
+-----------+---------+----+--------+-----------------------------------------+
.. note:: When writing, Phred quality scores will be truncated to the maximum
value in the variant's range and a warning will be issued. This is
consistent with the OBF projects.
When reading, an error will be raised if a decoded quality score is outside
the variant's range.
Format Parameters
-----------------
The following parameters are available to all FASTQ format readers and writers:
- ``variant``: A string indicating the quality score variant used to
decode/encode Phred quality scores. Must be one of ``sanger``,
``illumina1.3``, ``illumina1.8``, or ``solexa``. This parameter is preferred
over ``phred_offset`` because additional quality score range checks and
conversions can be performed. It is also more explicit.
- ``phred_offset``: An integer indicating the ASCII code offset used to
decode/encode Phred quality scores. Must be in the range ``[33, 126]``. All
decoded scores will be assumed to be Phred scores (i.e., no additional
conversions are performed). Prefer using ``variant`` over this parameter
whenever possible.
.. note:: You must provide ``variant`` or ``phred_offset`` when reading or
writing a FASTQ file. ``variant`` and ``phred_offset`` cannot both be
provided at the same time.
The following additional parameters are the same as in FASTA format
(:mod:`skbio.io.format.fasta`):
- ``constructor``: see ``constructor`` parameter in FASTA format
- ``seq_num``: see ``seq_num`` parameter in FASTA format
- ``id_whitespace_replacement``: see ``id_whitespace_replacement`` parameter in
FASTA format
- ``description_newline_replacement``: see ``description_newline_replacement``
parameter in FASTA format
- ``lowercase``: see ``lowercase`` parameter in FASTA format
Examples
--------
Suppose we have the following FASTQ file with two DNA sequences::
@seq1 description 1
AACACCAAACTTCTCCACC
ACGTGAGCTACAAAAGGGT
+seq1 description 1
''''Y^T]']C^CABCACC
`^LB^CCYT\T\Y\WF^^^
@seq2 description 2
TATGTATATATAACATATACATATATACATACATA
+
]KZ[PY]_[YY^'''AC^\\'BT''C'\AT''BBB
Note that the first sequence and its quality scores are split across multiple
lines, while the second sequence and its quality scores are each on a single
line. Also note that the first sequence has a duplicate ID and description on
the quality header line, while the second sequence does not.
Let's define this file in-memory as a ``StringIO``, though this could be a real
file path, file handle, or anything that's supported by scikit-bio's I/O
registry in practice:
>>> from io import StringIO
>>> fs = '\n'.join([
... r"@seq1 description 1",
... r"AACACCAAACTTCTCCACC",
... r"ACGTGAGCTACAAAAGGGT",
... r"+seq1 description 1",
... r"''''Y^T]']C^CABCACC",
... r"'^LB^CCYT\T\Y\WF^^^",
... r"@seq2 description 2",
... r"TATGTATATATAACATATACATATATACATACATA",
... r"+",
... r"]KZ[PY]_[YY^'''AC^\\'BT''C'\AT''BBB"])
>>> fh = StringIO(fs)
To load the sequences into a ``SequenceCollection``, we run:
>>> from skbio import SequenceCollection
>>> sc = SequenceCollection.read(fh, variant='sanger')
>>> sc
<SequenceCollection: n=2; mean +/- std length=36.50 +/- 1.50>
Note that quality scores are decoded from Sanger. To load the second sequence
as a ``DNA``:
>>> from skbio import DNA
>>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
>>> seq = DNA.read(fh, variant='sanger', seq_num=2)
>>> seq
DNA
----------------------------------------
Metadata:
u'description': u'description 2'
u'id': u'seq2'
Positional metadata:
u'quality': <dtype: uint8>
Stats:
length: 35
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 14.29%
----------------------------------------
0 TATGTATATA TAACATATAC ATATATACAT ACATA
To write our ``SequenceCollection`` to a FASTQ file with quality scores encoded
using the ``illumina1.3`` variant:
>>> new_fh = StringIO()
>>> print(sc.write(new_fh, format='fastq', variant='illumina1.3').getvalue())
@seq1 description 1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAGGGT
+
FFFFx}s|F|b}b`ab`bbF}ka}bbxs{s{x{ve}}}
@seq2 description 2
TATGTATATATAACATATACATATATACATACATA
+
|jyzox|~zxx}FFF`b}{{FasFFbF{`sFFaaa
<BLANKLINE>
>>> new_fh.close()
Note that the file has been written in normalized format: sequence and quality
scores each only occur on a single line and the sequence header line is
not repeated in the quality header line. Note also that the quality scores are
different because they have been encoded using a different variant.
References
----------
.. [1] Peter J. A. Cock, Christopher J. Fields, Naohisa Goto, Michael L. Heuer,
and Peter M. Rice. The Sanger FASTQ file format for sequences with quality
scores, and the Solexa/Illumina FASTQ variants. Nucl. Acids Res. (2010) 38
(6): 1767-1771. first published online December 16, 2009.
doi:10.1093/nar/gkp1137
http://nar.oxfordjournals.org/content/38/6/1767
.. [2] http://en.wikipedia.org/wiki/FASTQ_format
.. [3] http://www.open-bio.org/
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import range, zip
import re
import numpy as np
from skbio.io import create_format, FASTQFormatError
from skbio.io.format._base import (
_decode_qual_to_phred, _encode_phred_to_qual, _get_nth_sequence,
_parse_fasta_like_header, _format_fasta_like_records, _line_generator,
_too_many_blanks)
from skbio.alignment import SequenceCollection, Alignment
from skbio.sequence import Sequence, DNA, RNA, Protein
_whitespace_regex = re.compile(r'\s')
fastq = create_format('fastq')
@fastq.sniffer()
def _fastq_sniffer(fh):
# Strategy:
# Ignore up to 5 blank/whitespace-only lines at the beginning of the
# file. Read up to 10 records. If at least one record is read (i.e. the
# file isn't empty) and the quality scores are in printable ASCII range,
# assume the file is FASTQ.
if _too_many_blanks(fh, 5):
return False, {}
try:
not_empty = False
for _ in zip(range(10), _fastq_to_generator(fh, phred_offset=33)):
not_empty = True
return not_empty, {}
except (FASTQFormatError, ValueError):
return False, {}
@fastq.reader(None)
def _fastq_to_generator(fh, variant=None, phred_offset=None,
constructor=Sequence, **kwargs):
# Skip any blank or whitespace-only lines at beginning of file
seq_header = next(_line_generator(fh, skip_blanks=True))
if not seq_header.startswith('@'):
raise FASTQFormatError(
"Expected sequence (@) header line at start of file: %r"
% str(seq_header))
while seq_header is not None:
id_, desc = _parse_fasta_like_header(seq_header)
seq, qual_header = _parse_sequence_data(fh, seq_header)
if qual_header != '+' and qual_header[1:] != seq_header[1:]:
raise FASTQFormatError(
"Sequence (@) and quality (+) header lines do not match: "
"%r != %r" % (str(seq_header[1:]), str(qual_header[1:])))
phred_scores, seq_header = _parse_quality_scores(fh, len(seq),
variant,
phred_offset,
qual_header)
yield constructor(seq, metadata={'id': id_, 'description': desc},
positional_metadata={'quality': phred_scores},
**kwargs)
@fastq.reader(Sequence)
def _fastq_to_biological_sequence(fh, variant=None, phred_offset=None,
seq_num=1):
return _get_nth_sequence(
_fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
constructor=Sequence),
seq_num)
@fastq.reader(DNA)
def _fastq_to_dna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
**kwargs):
return _get_nth_sequence(
_fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
constructor=DNA, **kwargs),
seq_num)
@fastq.reader(RNA)
def _fastq_to_rna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
**kwargs):
return _get_nth_sequence(
_fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
constructor=RNA, **kwargs),
seq_num)
@fastq.reader(Protein)
def _fastq_to_protein_sequence(fh, variant=None, phred_offset=None, seq_num=1,
**kwargs):
return _get_nth_sequence(
_fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
constructor=Protein,
**kwargs),
seq_num)
@fastq.reader(SequenceCollection)
def _fastq_to_sequence_collection(fh, variant=None, phred_offset=None,
constructor=Sequence, **kwargs):
return SequenceCollection(
list(_fastq_to_generator(fh, variant=variant,
phred_offset=phred_offset,
constructor=constructor, **kwargs)))
@fastq.reader(Alignment)
def _fastq_to_alignment(fh, variant=None, phred_offset=None,
constructor=Sequence, **kwargs):
return Alignment(
list(_fastq_to_generator(fh, variant=variant,
phred_offset=phred_offset,
constructor=constructor, **kwargs)))
@fastq.writer(None)
def _generator_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' ', lowercase=None):
formatted_records = _format_fasta_like_records(
obj, id_whitespace_replacement, description_newline_replacement, True,
lowercase=lowercase)
for header, seq_str, qual_scores in formatted_records:
qual_str = _encode_phred_to_qual(qual_scores, variant=variant,
phred_offset=phred_offset)
fh.write('@')
fh.write(header)
fh.write('\n')
fh.write(seq_str)
fh.write('\n+\n')
fh.write(qual_str)
fh.write('\n')
@fastq.writer(Sequence)
def _biological_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' '):
_sequences_to_fastq([obj], fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement)
@fastq.writer(DNA)
def _dna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' ',
lowercase=None):
_sequences_to_fastq([obj], fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement, lowercase=lowercase)
@fastq.writer(RNA)
def _rna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' ',
lowercase=None):
_sequences_to_fastq([obj], fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement, lowercase=lowercase)
@fastq.writer(Protein)
def _protein_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' ',
lowercase=None):
_sequences_to_fastq([obj], fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement, lowercase=lowercase)
@fastq.writer(SequenceCollection)
def _sequence_collection_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' ',
lowercase=None):
_sequences_to_fastq(obj, fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement, lowercase=lowercase)
@fastq.writer(Alignment)
def _alignment_to_fastq(obj, fh, variant=None, phred_offset=None,
id_whitespace_replacement='_',
description_newline_replacement=' ',
lowercase=None):
_sequences_to_fastq(obj, fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement, lowercase=lowercase)
def _blank_error(unique_text):
error_string = ("Found blank or whitespace-only line {} in "
"FASTQ file").format(unique_text)
raise FASTQFormatError(error_string)
def _parse_sequence_data(fh, prev):
seq_chunks = []
for chunk in _line_generator(fh, skip_blanks=False):
if chunk.startswith('+'):
if not prev:
_blank_error("before '+'")
if not seq_chunks:
raise FASTQFormatError(
"Found FASTQ record without sequence data.")
return ''.join(seq_chunks), chunk
elif chunk.startswith('@'):
raise FASTQFormatError(
"Found FASTQ record that is missing a quality (+) header line "
"after sequence data.")
else:
if not prev:
_blank_error("after header or within sequence")
if _whitespace_regex.search(chunk):
raise FASTQFormatError(
"Found whitespace in sequence data: %r" % str(chunk))
seq_chunks.append(chunk)
prev = chunk
raise FASTQFormatError(
"Found incomplete/truncated FASTQ record at end of file.")
def _parse_quality_scores(fh, seq_len, variant, phred_offset, prev):
phred_scores = []
qual_len = 0
for chunk in _line_generator(fh, skip_blanks=False):
if chunk:
if chunk.startswith('@') and qual_len == seq_len:
return np.hstack(phred_scores), chunk
else:
if not prev:
_blank_error("after '+' or within quality scores")
qual_len += len(chunk)
if qual_len > seq_len:
raise FASTQFormatError(
"Found more quality score characters than sequence "
"characters. Extra quality score characters: %r" %
chunk[-(qual_len - seq_len):])
phred_scores.append(
_decode_qual_to_phred(chunk, variant=variant,
phred_offset=phred_offset))
prev = chunk
if qual_len != seq_len:
raise FASTQFormatError(
"Found incomplete/truncated FASTQ record at end of file.")
return np.hstack(phred_scores), None
def _sequences_to_fastq(obj, fh, variant, phred_offset,
id_whitespace_replacement,
description_newline_replacement, lowercase=None):
def seq_gen():
for seq in obj:
yield seq
_generator_to_fastq(
seq_gen(), fh, variant=variant, phred_offset=phred_offset,
id_whitespace_replacement=id_whitespace_replacement,
description_newline_replacement=description_newline_replacement,
lowercase=lowercase)
|
|
__docformat__ = "reStructuredText"
import logging
import platform
import weakref
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Hashable,
List,
Optional,
Set,
Tuple,
Union,
)
from pymunk.shape_filter import ShapeFilter
from pymunk.space_debug_draw_options import SpaceDebugDrawOptions
from . import _chipmunk_cffi, _version
cp = _chipmunk_cffi.lib
ffi = _chipmunk_cffi.ffi
from pymunk.constraints import Constraint
from ._pickle import PickleMixin, _State
from .body import Body
from .collision_handler import CollisionHandler
from .contact_point_set import ContactPointSet
from .query_info import PointQueryInfo, SegmentQueryInfo, ShapeQueryInfo
from .shapes import Shape
from .vec2d import Vec2d
if TYPE_CHECKING:
from .bb import BB
_AddableObjects = Union[Body, Shape, Constraint]
_logger = logging.getLogger(__name__)
class Space(PickleMixin, object):
"""Spaces are the basic unit of simulation. You add rigid bodies, shapes
and joints to it and then step them all forward together through time.
A Space can be copied and pickled. Note that any post step callbacks are
not copied. Also note that some internal collision cache data is not copied,
which can make the simulation a bit unstable the first few steps of the
fresh copy.
Custom properties set on the space will also be copied/pickled.
Any collision handlers will also be copied/pickled. Note that depending on
the pickle protocol used there are some restrictions on what functions can
be copied/pickled.
Example::
>>> import pymunk, pickle
>>> space = pymunk.Space()
>>> space2 = space.copy()
>>> space3 = pickle.loads(pickle.dumps(space))
"""
_pickle_attrs_init = PickleMixin._pickle_attrs_init + ["threaded"]
_pickle_attrs_general = PickleMixin._pickle_attrs_general + [
"iterations",
"gravity",
"damping",
"idle_speed_threshold",
"sleep_time_threshold",
"collision_slop",
"collision_bias",
"collision_persistence",
"threads",
]
def __init__(self, threaded: bool = False) -> None:
"""Create a new instance of the Space.
If you set threaded=True the step function will run in threaded mode
which might give a speedup. Note that even when you set threaded=True
you still have to set Space.threads=2 to actually use more than one
thread.
Also note that threaded mode is not available on Windows, and setting
threaded=True has no effect on that platform.
"""
self.threaded = threaded and platform.system() != "Windows"
if self.threaded:
cp_space = cp.cpHastySpaceNew()
freefunc = cp.cpHastySpaceFree
else:
cp_space = cp.cpSpaceNew()
freefunc = cp.cpSpaceFree
def spacefree(cp_space): # type: ignore
_logger.debug("spacefree start %s", cp_space)
cp_shapes = []
@ffi.callback("cpSpaceShapeIteratorFunc")
def cf1(cp_shape, data): # type: ignore
# print("spacefree shapecallback")
cp_shapes.append(cp_shape)
# cp_space = cp.cpShapeGetSpace(cp_shape)
# cp.cpSpaceRemoveShape(cp_space, cp_shape)
# print("spacefree shapes", cp_space)
cp.cpSpaceEachShape(cp_space, cf1, ffi.NULL)
for cp_shape in cp_shapes:
_logger.debug("spacefree remove shape %s %s", cp_space, cp_shape)
cp.cpSpaceRemoveShape(cp_space, cp_shape)
cp.cpShapeSetBody(cp_shape, ffi.NULL)
cp_constraints = []
@ffi.callback("cpSpaceConstraintIteratorFunc")
def cf2(cp_constraint, data): # type: ignore
# print("spacefree shapecallback")
cp_constraints.append(cp_constraint)
cp.cpSpaceEachConstraint(cp_space, cf2, ffi.NULL)
for cp_constraint in cp_constraints:
_logger.debug(
"spacefree remove constraint %s %s", cp_space, cp_constraint
)
cp.cpSpaceRemoveConstraint(cp_space, cp_constraint)
cp_bodies = []
@ffi.callback("cpSpaceBodyIteratorFunc")
def cf3(cp_body, data): # type:ignore
# print("spacefree shapecallback")
cp_bodies.append(cp_body)
cp.cpSpaceEachBody(cp_space, cf3, ffi.NULL)
for cp_body in cp_bodies:
_logger.debug("spacefree remove body %s %s", cp_space, cp_body)
cp.cpSpaceRemoveBody(cp_space, cp_body)
_logger.debug("spacefree free %s", cp_space)
freefunc(cp_space)
self._space = ffi.gc(cp_space, spacefree)
self._handlers: Dict[
Any, CollisionHandler
] = {} # To prevent the gc to collect the callbacks.
self._post_step_callbacks: Dict[Any, Callable[["Space"], None]] = {}
self._removed_shapes: Dict[int, Shape] = {}
self._shapes: Dict[int, Shape] = {}
self._bodies: Dict[Body, None] = {}
self._static_body: Optional[Body] = None
self._constraints: Dict[Constraint, None] = {}
self._locked = False
self._add_later: Set[_AddableObjects] = set()
self._remove_later: Set[_AddableObjects] = set()
def _get_self(self) -> "Space":
return self
@property
def shapes(self) -> List[Shape]:
"""A list of all the shapes added to this space
(includes both static and non-static)
"""
return list(self._shapes.values())
@property
def bodies(self) -> List[Body]:
"""A list of the bodies added to this space"""
return list(self._bodies)
@property
def constraints(self) -> List[Constraint]:
"""A list of the constraints added to this space"""
return list(self._constraints)
def _setup_static_body(self, static_body: Body) -> None:
static_body._space = weakref.proxy(self) # type: ignore
cp.cpSpaceAddBody(self._space, static_body._body)
@property
def static_body(self) -> Body:
"""A dedicated static body for the space.
You don't have to use it, but many times it can be convenient to have
a static body together with the space.
"""
if self._static_body is None:
self._static_body = Body(body_type=Body.STATIC)
self._setup_static_body(self._static_body)
# self.add(self._static_body)
# b = cp.cpSpaceGetStaticBody(self._space)
# self._static_body = Body._init_with_body(b)
# self._static_body._space = self
# assert self._static_body is not None
return self._static_body
def _set_iterations(self, value: int) -> None:
cp.cpSpaceSetIterations(self._space, value)
def _get_iterations(self) -> int:
return cp.cpSpaceGetIterations(self._space)
iterations = property(
_get_iterations,
_set_iterations,
doc="""Iterations allow you to control the accuracy of the solver.
Defaults to 10.
Pymunk uses an iterative solver to figure out the forces between
objects in the space. What this means is that it builds a big list of
all of the collisions, joints, and other constraints between the
bodies and makes several passes over the list considering each one
individually. The number of passes it makes is the iteration count,
and each iteration makes the solution more accurate. If you use too
many iterations, the physics should look nice and solid, but may use
up too much CPU time. If you use too few iterations, the simulation
may seem mushy or bouncy when the objects should be solid. Setting
the number of iterations lets you balance between CPU usage and the
accuracy of the physics. Pymunk's default of 10 iterations is
sufficient for most simple games.
""",
)
def _set_gravity(self, gravity_vector: Tuple[float, float]) -> None:
assert len(gravity_vector) == 2
cp.cpSpaceSetGravity(self._space, gravity_vector)
def _get_gravity(self) -> Vec2d:
v = cp.cpSpaceGetGravity(self._space)
return Vec2d(v.x, v.y)
gravity = property(
_get_gravity,
_set_gravity,
doc="""Global gravity applied to the space.
Defaults to (0,0). Can be overridden on a per body basis by writing
custom integration functions and set it on the body:
:py:meth:`pymunk.Body.velocity_func`.
""",
)
def _set_damping(self, damping: float) -> None:
cp.cpSpaceSetDamping(self._space, damping)
def _get_damping(self) -> float:
return cp.cpSpaceGetDamping(self._space)
damping = property(
_get_damping,
_set_damping,
doc="""Amount of simple damping to apply to the space.
A value of 0.9 means that each body will lose 10% of its velocity per
second. Defaults to 1. Like gravity, it can be overridden on a per
body basis.
""",
)
def _set_idle_speed_threshold(self, idle_speed_threshold: float) -> None:
cp.cpSpaceSetIdleSpeedThreshold(self._space, idle_speed_threshold)
def _get_idle_speed_threshold(self) -> float:
return cp.cpSpaceGetIdleSpeedThreshold(self._space)
idle_speed_threshold = property(
_get_idle_speed_threshold,
_set_idle_speed_threshold,
doc="""Speed threshold for a body to be considered idle.
The default value of 0 means the space estimates a good threshold
based on gravity.
""",
)
def _set_sleep_time_threshold(self, sleep_time_threshold: float) -> None:
cp.cpSpaceSetSleepTimeThreshold(self._space, sleep_time_threshold)
def _get_sleep_time_threshold(self) -> float:
return cp.cpSpaceGetSleepTimeThreshold(self._space)
sleep_time_threshold = property(
_get_sleep_time_threshold,
_set_sleep_time_threshold,
doc="""Time a group of bodies must remain idle in order to fall
asleep.
The default value of `inf` disables the sleeping algorithm.
""",
)
def _set_collision_slop(self, collision_slop: float) -> None:
cp.cpSpaceSetCollisionSlop(self._space, collision_slop)
def _get_collision_slop(self) -> float:
return cp.cpSpaceGetCollisionSlop(self._space)
collision_slop = property(
_get_collision_slop,
_set_collision_slop,
doc="""Amount of overlap between shapes that is allowed.
To improve stability, set this as high as you can without noticeable
overlapping. It defaults to 0.1.
""",
)
def _set_collision_bias(self, collision_bias: float) -> None:
cp.cpSpaceSetCollisionBias(self._space, collision_bias)
def _get_collision_bias(self) -> float:
return cp.cpSpaceGetCollisionBias(self._space)
collision_bias = property(
_get_collision_bias,
_set_collision_bias,
doc="""Determines how fast overlapping shapes are pushed apart.
Pymunk allows fast moving objects to overlap, then fixes the overlap
over time. Overlapping objects are unavoidable even if swept
collisions are supported, and this is an efficient and stable way to
deal with overlapping objects. The bias value controls what
percentage of overlap remains unfixed after a second and defaults
to ~0.2%. Valid values are in the range from 0 to 1, but using 0 is
not recommended for stability reasons. The default value is
calculated as cpfpow(1.0f - 0.1f, 60.0f) meaning that pymunk attempts
to correct 10% of error ever 1/60th of a second.
..Note::
Very very few games will need to change this value.
""",
)
def _set_collision_persistence(self, collision_persistence: float) -> None:
cp.cpSpaceSetCollisionPersistence(self._space, collision_persistence)
def _get_collision_persistence(self) -> float:
return cp.cpSpaceGetCollisionPersistence(self._space)
collision_persistence = property(
_get_collision_persistence,
_set_collision_persistence,
doc="""The number of frames the space keeps collision solutions
around for.
Helps prevent jittering contacts from getting worse. This defaults
to 3.
..Note::
Very very few games will need to change this value.
""",
)
def _get_current_time_step(self) -> int:
return cp.cpSpaceGetCurrentTimeStep(self._space)
current_time_step = property(
_get_current_time_step,
doc="""Retrieves the current (if you are in a callback from
Space.step()) or most recent (outside of a Space.step() call)
timestep.
""",
)
def add(self, *objs: _AddableObjects) -> None:
"""Add one or many shapes, bodies or constraints (joints) to the space
Unlike Chipmunk and earlier versions of pymunk its now allowed to add
objects even from a callback during the simulation step. However, the
add will not be performed until the end of the step.
"""
if self._locked:
self._add_later.update(objs)
return
# add bodies first, since the shapes require their bodies to be
# already added. This allows code like space.add(shape, body).
for o in objs:
if isinstance(o, Body):
self._add_body(o)
for o in objs:
if isinstance(o, Body):
pass
elif isinstance(o, Shape):
self._add_shape(o)
elif isinstance(o, Constraint):
self._add_constraint(o)
else:
raise Exception(f"Unsupported type {type(o)} of {o}.")
def remove(self, *objs: _AddableObjects) -> None:
"""Remove one or many shapes, bodies or constraints from the space
Unlike Chipmunk and earlier versions of Pymunk its now allowed to
remove objects even from a callback during the simulation step.
However, the removal will not be performed until the end of the step.
.. Note::
When removing objects from the space, make sure you remove any
other objects that reference it. For instance, when you remove a
body, remove the joints and shapes attached to it.
"""
if self._locked:
self._remove_later.update(objs)
return
for o in objs:
if isinstance(o, Body):
self._remove_body(o)
elif isinstance(o, Shape):
self._remove_shape(o)
elif isinstance(o, Constraint):
self._remove_constraint(o)
else:
raise Exception(f"Unsupported type {type(o)} of {o}.")
def _add_shape(self, shape: "Shape") -> None:
"""Adds a shape to the space"""
# print("addshape", self._space, shape)
assert shape._id not in self._shapes, "Shape already added to space."
assert (
shape.space == None
), "Shape already added to another space. A shape can only be in one space at a time."
assert shape.body != None, "The shape's body is not set."
assert (
shape.body.space == self
), "The shape's body must be added to the space before (or at the same time) as the shape."
shape._space = weakref.proxy(self)
self._shapes[shape._id] = shape
cp.cpSpaceAddShape(self._space, shape._shape)
def _add_body(self, body: "Body") -> None:
"""Adds a body to the space"""
assert body not in self._bodies, "Body already added to this space."
assert body.space == None, "Body already added to another space."
body._space = weakref.proxy(self)
self._bodies[body] = None
cp.cpSpaceAddBody(self._space, body._body)
def _add_constraint(self, constraint: "Constraint") -> None:
"""Adds a constraint to the space"""
assert constraint not in self._constraints, "Constraint already added to space."
self._constraints[constraint] = None
cp.cpSpaceAddConstraint(self._space, constraint._constraint)
def _remove_shape(self, shape: "Shape") -> None:
"""Removes a shape from the space"""
assert shape._id in self._shapes, "shape not in space, already removed?"
self._removed_shapes[shape._id] = shape
shape._space = None
# During GC at program exit sometimes the shape might already be removed. Then skip this step.
if cp.cpSpaceContainsShape(self._space, shape._shape):
cp.cpSpaceRemoveShape(self._space, shape._shape)
del self._shapes[shape._id]
def _remove_body(self, body: "Body") -> None:
"""Removes a body from the space"""
assert body in self._bodies, "body not in space, already removed?"
body._space = None
# During GC at program exit sometimes the shape might already be removed. Then skip this step.
if cp.cpSpaceContainsBody(self._space, body._body):
cp.cpSpaceRemoveBody(self._space, body._body)
del self._bodies[body]
def _remove_constraint(self, constraint: "Constraint") -> None:
"""Removes a constraint from the space"""
assert (
constraint in self._constraints
), "constraint not in space, already removed?"
# print("remove", constraint, constraint._constraint, self._constraints)
# During GC at program exit sometimes the constraint might already be removed. Then skip this steip.
if cp.cpSpaceContainsConstraint(self._space, constraint._constraint):
cp.cpSpaceRemoveConstraint(self._space, constraint._constraint)
del self._constraints[constraint]
def reindex_shape(self, shape: Shape) -> None:
"""Update the collision detection data for a specific shape in the
space.
"""
cp.cpSpaceReindexShape(self._space, shape._shape)
def reindex_shapes_for_body(self, body: Body) -> None:
"""Reindex all the shapes for a certain body."""
cp.cpSpaceReindexShapesForBody(self._space, body._body)
def reindex_static(self) -> None:
"""Update the collision detection info for the static shapes in the
space. You only need to call this if you move one of the static shapes.
"""
cp.cpSpaceReindexStatic(self._space)
def _get_threads(self) -> int:
if self.threaded:
return int(cp.cpHastySpaceGetThreads(self._space))
return 1
def _set_threads(self, n: int) -> None:
if self.threaded:
cp.cpHastySpaceSetThreads(self._space, n)
threads = property(
_get_threads,
_set_threads,
doc="""The number of threads to use for running the step function.
Only valid when the Space was created with threaded=True. Currently the
max limit is 2, setting a higher value wont have any effect. The
default is 1 regardless if the Space was created with threaded=True,
to keep determinism in the simulation. Note that Windows does not
support the threaded solver.
""",
)
def use_spatial_hash(self, dim: float, count: int) -> None:
"""Switch the space to use a spatial hash instead of the bounding box
tree.
Pymunk supports two spatial indexes. The default is an axis-aligned
bounding box tree inspired by the one used in the Bullet Physics
library, but caching of overlapping leaves was added to give it very
good temporal coherence. The tree requires no tuning, and most games
will find that they get the best performance using from the tree. The
other available spatial index type available is a spatial hash, which
can be much faster when you have a very large number (1000s) of
objects that are all the same size. For smaller numbers of objects,
or objects that vary a lot in size, the spatial hash is usually much
slower. It also requires tuning (usually through experimentation) to
get the best possible performance.
The spatial hash data is fairly size sensitive. dim is the size of
the hash cells. Setting dim to the average collision shape size is
likely to give the best performance. Setting dim too small will cause
the shape to be inserted into many cells, setting it too low will
cause too many objects into the same hash slot.
count is the suggested minimum number of cells in the hash table. If
there are too few cells, the spatial hash will return many false
positives. Too many cells will be hard on the cache and waste memory.
Setting count to ~10x the number of objects in the space is probably a
good starting point. Tune from there if necessary.
:param dim: the size of the hash cells
:param count: the suggested minimum number of cells in the hash table
"""
cp.cpSpaceUseSpatialHash(self._space, dim, count)
def step(self, dt: float) -> None:
"""Update the space for the given time step.
Using a fixed time step is highly recommended. Doing so will increase
the efficiency of the contact persistence, requiring an order of
magnitude fewer iterations to resolve the collisions in the usual case.
It is not the same to call step 10 times with a dt of 0.1 and
calling it 100 times with a dt of 0.01 even if the end result is
that the simulation moved forward 100 units. Performing multiple
calls with a smaller dt creates a more stable and accurate
simulation. Therefor it sometimes make sense to have a little for loop
around the step call, like in this example:
>>> import pymunk
>>> s = pymunk.Space()
>>> steps = 10
>>> for x in range(steps): # move simulation forward 0.1 seconds:
... s.step(0.1 / steps)
:param dt: Time step length
"""
try:
self._locked = True
if self.threaded:
cp.cpHastySpaceStep(self._space, dt)
else:
cp.cpSpaceStep(self._space, dt)
self._removed_shapes = {}
finally:
self._locked = False
self.add(*self._add_later)
self._add_later.clear()
for obj in self._remove_later:
self.remove(obj)
self._remove_later.clear()
for key in self._post_step_callbacks:
self._post_step_callbacks[key](self)
self._post_step_callbacks = {}
def add_collision_handler(
self, collision_type_a: int, collision_type_b: int
) -> CollisionHandler:
"""Return the :py:class:`CollisionHandler` for collisions between
objects of type collision_type_a and collision_type_b.
Fill the desired collision callback functions, for details see the
:py:class:`CollisionHandler` object.
Whenever shapes with collision types (:py:attr:`Shape.collision_type`)
a and b collide, this handler will be used to process the collision
events. When a new collision handler is created, the callbacks will all be
set to builtin callbacks that perform the default behavior (call the
wildcard handlers, and accept all collisions).
:param int collision_type_a: Collision type a
:param int collision_type_b: Collision type b
:rtype: :py:class:`CollisionHandler`
"""
key = min(collision_type_a, collision_type_b), max(
collision_type_a, collision_type_b
)
if key in self._handlers:
return self._handlers[key]
h = cp.cpSpaceAddCollisionHandler(
self._space, collision_type_a, collision_type_b
)
ch = CollisionHandler(h, self)
self._handlers[key] = ch
return ch
def add_wildcard_collision_handler(self, collision_type_a: int) -> CollisionHandler:
"""Add a wildcard collision handler for given collision type.
This handler will be used any time an object with this type collides
with another object, regardless of its type. A good example is a
projectile that should be destroyed the first time it hits anything.
There may be a specific collision handler and two wildcard handlers.
It's up to the specific handler to decide if and when to call the
wildcard handlers and what to do with their return values.
When a new wildcard handler is created, the callbacks will all be
set to builtin callbacks that perform the default behavior. (accept
all collisions in :py:func:`~CollisionHandler.begin` and
:py:func:`~CollisionHandler.pre_solve`, or do nothing for
:py:func:`~CollisionHandler.post_solve` and
:py:func:`~CollisionHandler.separate`.
:param int collision_type_a: Collision type
:rtype: :py:class:`CollisionHandler`
"""
if collision_type_a in self._handlers:
return self._handlers[collision_type_a]
h = cp.cpSpaceAddWildcardHandler(self._space, collision_type_a)
ch = CollisionHandler(h, self)
self._handlers[collision_type_a] = ch
return ch
def add_default_collision_handler(self) -> CollisionHandler:
"""Return a reference to the default collision handler or that is
used to process all collisions that don't have a more specific
handler.
The default behavior for each of the callbacks is to call
the wildcard handlers, ANDing their return values together if
applicable.
"""
if None in self._handlers:
return self._handlers[None]
_h = cp.cpSpaceAddDefaultCollisionHandler(self._space)
h = CollisionHandler(_h, self)
self._handlers[None] = h
return h
def add_post_step_callback(
self,
callback_function: Callable[
..., None
], # TODO: Fix me once PEP-612 is implemented
key: Hashable,
*args: Any,
**kwargs: Any,
) -> bool:
"""Add a function to be called last in the next simulation step.
Post step callbacks are registered as a function and an object used as
a key. You can only register one post step callback per object.
This function was more useful with earlier versions of pymunk where
you weren't allowed to use the add and remove methods on the space
during a simulation step. But this function is still available for
other uses and to keep backwards compatibility.
.. Note::
If you remove a shape from the callback it will trigger the
collision handler for the 'separate' event if it the shape was
touching when removed.
.. Note::
Post step callbacks are not included in pickle / copy of the space.
:param callback_function: The callback function
:type callback_function: `func(space : Space, key, *args, **kwargs)`
:param Any key:
This object is used as a key, you can only have one callback
for a single object. It is passed on to the callback function.
:param args: Optional parameters passed to the callback
:param kwargs: Optional keyword parameters passed on to the callback
:return: True if key was not previously added, False otherwise
"""
if key in self._post_step_callbacks:
return False
def f(x): # type: ignore
callback_function(self, key, *args, **kwargs)
self._post_step_callbacks[key] = f
return True
def point_query(
self, point: Tuple[float, float], max_distance: float, shape_filter: ShapeFilter
) -> List[PointQueryInfo]:
"""Query space at point for shapes within the given distance range.
The filter is applied to the query and follows the same rules as the
collision detection. If a maxDistance of 0.0 is used, the point must
lie inside a shape. Negative max_distance is also allowed meaning that
the point must be a under a certain depth within a shape to be
considered a match.
See :py:class:`ShapeFilter` for details about how the shape_filter
parameter can be used.
.. Note::
Sensor shapes are included in the result (In
:py:meth:`Space.point_query_nearest` they are not)
:param point: Where to check for collision in the Space
:type point: :py:class:`~vec2d.Vec2d` or (float,float)
:param float max_distance: Match only within this distance
:param ShapeFilter shape_filter: Only pick shapes matching the filter
:rtype: [:py:class:`PointQueryInfo`]
"""
assert len(point) == 2
query_hits: List[PointQueryInfo] = []
@ffi.callback("cpSpacePointQueryFunc")
def cf(_shape, point, distance, gradient, data): # type: ignore
# space = ffi.from_handle(data)
shape = self._get_shape(_shape)
p = PointQueryInfo(
shape, Vec2d(point.x, point.y), distance, Vec2d(gradient.x, gradient.y)
)
nonlocal query_hits
query_hits.append(p)
data = ffi.new_handle(self)
cp.cpSpacePointQuery(self._space, point, max_distance, shape_filter, cf, data)
return query_hits
def _get_shape(self, _shape: Any) -> Optional[Shape]:
if not bool(_shape):
return None
shapeid = int(ffi.cast("int", cp.cpShapeGetUserData(_shape)))
# return self._shapes[hashid_private]
if shapeid in self._shapes:
return self._shapes[shapeid]
elif shapeid in self._removed_shapes:
return self._removed_shapes[shapeid]
else:
return None
def point_query_nearest(
self, point: Tuple[float, float], max_distance: float, shape_filter: ShapeFilter
) -> Optional[PointQueryInfo]:
"""Query space at point the nearest shape within the given distance
range.
The filter is applied to the query and follows the same rules as the
collision detection. If a maxDistance of 0.0 is used, the point must
lie inside a shape. Negative max_distance is also allowed meaning that
the point must be a under a certain depth within a shape to be
considered a match.
See :py:class:`ShapeFilter` for details about how the shape_filter
parameter can be used.
.. Note::
Sensor shapes are not included in the result (In
:py:meth:`Space.point_query` they are)
:param point: Where to check for collision in the Space
:type point: :py:class:`~vec2d.Vec2d` or (float,float)
:param float max_distance: Match only within this distance
:param ShapeFilter shape_filter: Only pick shapes matching the filter
:rtype: :py:class:`PointQueryInfo` or None
"""
assert len(point) == 2
info = ffi.new("cpPointQueryInfo *")
_shape = cp.cpSpacePointQueryNearest(
self._space, point, max_distance, shape_filter, info
)
shape = self._get_shape(_shape)
if shape != None:
return PointQueryInfo(
shape,
Vec2d(info.point.x, info.point.y),
info.distance,
Vec2d(info.gradient.x, info.gradient.y),
)
return None
def segment_query(
self,
start: Tuple[float, float],
end: Tuple[float, float],
radius: float,
shape_filter: ShapeFilter,
) -> List[SegmentQueryInfo]:
"""Query space along the line segment from start to end with the
given radius.
The filter is applied to the query and follows the same rules as the
collision detection.
See :py:class:`ShapeFilter` for details about how the shape_filter
parameter can be used.
.. Note::
Sensor shapes are included in the result (In
:py:meth:`Space.segment_query_first` they are not)
:param start: Starting point
:param end: End point
:param float radius: Radius
:param ShapeFilter shape_filter: Shape filter
:rtype: [:py:class:`SegmentQueryInfo`]
"""
assert len(start) == 2
assert len(end) == 2
query_hits: List[SegmentQueryInfo] = []
@ffi.callback("cpSpaceSegmentQueryFunc")
def cf(_shape, point, normal, alpha, data): # type: ignore
shape = self._get_shape(_shape)
p = SegmentQueryInfo(
shape, Vec2d(point.x, point.y), Vec2d(normal.x, normal.y), alpha
)
nonlocal query_hits
query_hits.append(p)
data = ffi.new_handle(self)
cp.cpSpaceSegmentQuery(self._space, start, end, radius, shape_filter, cf, data)
return query_hits
def segment_query_first(
self,
start: Tuple[float, float],
end: Tuple[float, float],
radius: float,
shape_filter: ShapeFilter,
) -> Optional[SegmentQueryInfo]:
"""Query space along the line segment from start to end with the
given radius.
The filter is applied to the query and follows the same rules as the
collision detection.
.. Note::
Sensor shapes are not included in the result (In
:py:meth:`Space.segment_query` they are)
See :py:class:`ShapeFilter` for details about how the shape_filter
parameter can be used.
:rtype: :py:class:`SegmentQueryInfo` or None
"""
assert len(start) == 2
assert len(end) == 2
info = ffi.new("cpSegmentQueryInfo *")
_shape = cp.cpSpaceSegmentQueryFirst(
self._space, start, end, radius, shape_filter, info
)
shape = self._get_shape(_shape)
if shape != None:
return SegmentQueryInfo(
shape,
Vec2d(info.point.x, info.point.y),
Vec2d(info.normal.x, info.normal.y),
info.alpha,
)
return None
def bb_query(self, bb: "BB", shape_filter: ShapeFilter) -> List[Shape]:
"""Query space to find all shapes near bb.
The filter is applied to the query and follows the same rules as the
collision detection.
.. Note::
Sensor shapes are included in the result
:param bb: Bounding box
:param shape_filter: Shape filter
:rtype: [:py:class:`Shape`]
"""
query_hits = []
@ffi.callback("cpSpaceBBQueryFunc")
def cf(_shape, data): # type: ignore
shape = self._get_shape(_shape)
assert shape is not None
nonlocal query_hits
query_hits.append(shape)
data = ffi.new_handle(self)
cp.cpSpaceBBQuery(self._space, bb, shape_filter, cf, data)
return query_hits
def shape_query(self, shape: Shape) -> List[ShapeQueryInfo]:
"""Query a space for any shapes overlapping the given shape
.. Note::
Sensor shapes are included in the result
:param shape: Shape to query with
:type shape: :py:class:`Circle`, :py:class:`Poly` or :py:class:`Segment`
:rtype: [:py:class:`ShapeQueryInfo`]
"""
query_hits = []
@ffi.callback("cpSpaceShapeQueryFunc")
def cf(_shape, _points, _data): # type: ignore
found_shape = self._get_shape(_shape)
point_set = ContactPointSet._from_cp(_points)
info = ShapeQueryInfo(found_shape, point_set)
nonlocal query_hits
query_hits.append(info)
data = ffi.new_handle(self)
cp.cpSpaceShapeQuery(self._space, shape._shape, cf, data)
return query_hits
def debug_draw(self, options: SpaceDebugDrawOptions) -> None:
"""Debug draw the current state of the space using the supplied drawing
options.
If you use a graphics backend that is already supported, such as pygame
and pyglet, you can use the predefined options in their x_util modules,
for example :py:class:`pygame_util.DrawOptions`.
Its also possible to write your own graphics backend, see
:py:class:`SpaceDebugDrawOptions`.
If you require any advanced or optimized drawing its probably best to
not use this function for the drawing since its meant for debugging
and quick scripting.
:type options: :py:class:`SpaceDebugDrawOptions`
"""
if options._use_chipmunk_debug_draw:
h = ffi.new_handle(self)
# we need to hold h until the end of cpSpaceDebugDraw to prevent GC
options._options.data = h
with options:
cp.cpSpaceDebugDraw(self._space, options._options)
else:
for shape in self.shapes:
options.draw_shape(shape)
# def get_batched_bodies(self, shape_filter):
# """Return a memoryview for use when the non-batch api is not performant enough.
# .. note::
# Experimental API. Likely to change in future major, minor orpoint
# releases.
# """
# pass
def __getstate__(self) -> _State:
"""Return the state of this object
This method allows the usage of the :mod:`copy` and :mod:`pickle`
modules with this class.
"""
d = super(Space, self).__getstate__()
d["special"].append(("pymunk_version", _version.version))
# bodies needs to be added to the state before their shapes.
d["special"].append(("bodies", self.bodies))
if self._static_body != None:
# print("getstate", self._static_body)
d["special"].append(("_static_body", self._static_body))
d["special"].append(("shapes", self.shapes))
d["special"].append(("constraints", self.constraints))
handlers = []
for k, v in self._handlers.items():
h: Dict[str, Any] = {}
if v._begin_base is not None:
h["_begin_base"] = v._begin_base
if v._pre_solve_base is not None:
h["_pre_solve_base"] = v._pre_solve_base
if v._post_solve_base is not None:
h["_post_solve_base"] = v._post_solve_base
if v._separate_base is not None:
h["_separate_base"] = v._separate_base
handlers.append((k, h))
d["special"].append(("_handlers", handlers))
return d
def __setstate__(self, state: _State) -> None:
"""Unpack this object from a saved state.
This method allows the usage of the :mod:`copy` and :mod:`pickle`
modules with this class.
"""
super(Space, self).__setstate__(state)
for k, v in state["special"]:
if k == "pymunk_version":
assert (
_version.version == v
), f"Pymunk version {v} of pickled object does not match current Pymunk version {_version.version}"
elif k == "bodies":
self.add(*v)
elif k == "_static_body":
# _ = cp.cpSpaceSetStaticBody(self._space, v._body)
# v._space = self
# self._static_body = v
# print("setstate", v, self._static_body)
self._static_body = v
self._setup_static_body(v)
# self._static_body._space = weakref.proxy(self)
# cp.cpSpaceAddBody(self._space, v._body)
# self.add(v)
elif k == "shapes":
# print("setstate shapes", v)
self.add(*v)
elif k == "constraints":
self.add(*v)
elif k == "_handlers":
for k2, hd in v:
if k2 == None:
h = self.add_default_collision_handler()
elif isinstance(k2, tuple):
h = self.add_collision_handler(k2[0], k2[1])
else:
h = self.add_wildcard_collision_handler(k2)
if "_begin_base" in hd:
h.begin = hd["_begin_base"]
if "_pre_solve_base" in hd:
h.pre_solve = hd["_pre_solve_base"]
if "_post_solve_base" in hd:
h.post_solve = hd["_post_solve_base"]
if "_separate_base" in hd:
h.separate = hd["_separate_base"]
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from neutron_fwaas.openstack.common import eventlet_backdoor
from neutron_fwaas.openstack.common._i18n import _LE, _LI, _LW
from neutron_fwaas.openstack.common import log as logging
from neutron_fwaas.openstack.common import systemd
from neutron_fwaas.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
|
|
# processor.py
# version 2013.06.21
def processMutations(first_file, second_file , input_directory, output_file, output_directory, lines_skip):
import methodslist as ml
import xlwt
import os
# Set directory
old_dir = os.getcwd()
os.chdir(output_directory)
print('\nStarting at %r' % input_directory)
# Import text to tables
table1 = ml.importTable(first_file, input_directory, lines_skip)
table2 = ml.importTable(second_file, input_directory, lines_skip)
# Save rows with detected mutations
keeps1 = ml.saveByValue(table1, 'judgement', 'KEEP')
keeps2 = ml.saveByValue(table2, 'judgement', 'KEEP')
# Find mutated rows with that are matched and unmatched
total_comparison = ml.compareTables(keeps1, keeps2)
'''
0 is same_first_table
1 is same_second_table
2 is diff_first_table
3 is diff_second_table
'''
matched_keeps1 = total_comparison[0]
matched_keeps2 = total_comparison[1]
unmatched_keeps1 = total_comparison[2]
unmatched_keeps2 = total_comparison[3]
# Save rows with no detected mutations
rejects1 = ml.saveByValue(table1, 'judgement', 'REJECT')
rejects2 = ml.saveByValue(table2, 'judgement', 'REJECT')
# Compare unmatched KEEP rows of one table to REJECT rows of other table
total_compare_reject2_unmatched1 = ml.compareTables(unmatched_keeps1, rejects2)
matched_reject2_unmatched1 = total_compare_reject2_unmatched1[1] # 1 is the second column
total_compare_reject1_unmatched2 = ml.compareTables(unmatched_keeps2, rejects1)
matched_reject1_unmatched2 = total_compare_reject1_unmatched2[1]
# Select only certain categories
keeps1 = ml.saveByCategory(keeps1)
keeps2 = ml.saveByCategory(keeps2)
matched_reject2_unmatched1 = ml.saveByCategory(matched_reject2_unmatched1)
matched_reject1_unmatched2 = ml.saveByCategory(matched_reject1_unmatched2)
# Calculate concordance between NC TC and NF TF
concordance_table = ml.calcCondordance(unmatched_keeps1, unmatched_keeps2, matched_keeps1)
# Write tables to text files
book = xlwt.Workbook()
ml.outputTable(book, concordance_table, 'Concordance Summary', output_directory)
ml.outputTable(book, keeps1, 'NC TC KEEP', output_directory)
ml.outputTable(book, keeps2, 'NF TF KEEP', output_directory)
ml.outputTable(book, matched_reject2_unmatched1, 'NF TF Matched REJECTS', output_directory)
ml.outputTable(book, matched_reject1_unmatched2, 'NC TC Matched REJECTS', output_directory)
book.save(output_file)
os.chdir(old_dir)
print('Finished')
def processGeneCalls(first_file, second_file , input_directory, output_file, output_directory, lines_skip):
from xlsxwriter.workbook import Workbook
import methodslist as ml
import os
# Set directory
old_dir = os.getcwd()
os.chdir(output_directory)
print('\nStarting at %r' % input_directory)
# Import text to tables
table1 = ml.importTable(first_file, input_directory, lines_skip)
table2 = ml.importTable(second_file, input_directory, lines_skip)
# Find mutated rows with that are matched and unmatched
total_comparison = ml.compareTables(table1, table2)
matched1 = total_comparison[0]
matched2 = total_comparison[1]
unmatched1 = total_comparison[2]
unmatched2 = total_comparison[3]
'''
#Select only certain categories
keeps1 = ml.saveByCategory(keeps1)
keeps2 = ml.saveByCategory(keeps2)
matched_reject2_unmatched1 = ml.saveByCategory(matched_reject2_unmatched1)
matched_reject1_unmatched2 = ml.saveByCategory(matched_reject1_unmatched2)
'''
# Calculate condordance
condordance_table = ml.calcCondordance(unmatched1, unmatched2, matched1)
# Write tables to text files
book = Workbook(output_file)
ml.outputTable(book, condordance_table, 'Concordance Summary', output_directory)
ml.outputTable(book, table1, 'NC TC Input', output_directory)
ml.outputTable(book, table2, 'NF TF Input', output_directory)
ml.outputTable(book, matched1, 'NC TC Matches', output_directory)
ml.outputTable(book, matched2, 'NF TF Matches', output_directory)
book.close()
os.chdir(old_dir)
print('Finished')
def processNucleotides(first_file, second_file , input_directory, output_file, output_directory, lines_skip):
import methodslist as ml
import xlwt
import os
# Set directory
old_dir = os.getcwd()
os.chdir(output_directory)
print('\nStarting at %r' % input_directory)
# Import text to tables
table1 = ml.importTable(first_file, input_directory, lines_skip)
table2 = ml.importTable(second_file, input_directory, lines_skip)
# Compare Nucleotide Changes 1st Sheet
table0 = ['Change',
'A to T', 'A to C', 'A to G',
'T to A', 'T to C', 'T to G',
'C to A', 'C to T', 'C to G',
'G to A', 'G to T', 'G to C']
rows1 = len(table1)
print('Comparing %r positions' % rows1)
a_to_t = 0
a_to_g = 0
a_to_c = 0
t_to_a = 0
t_to_c = 0
t_to_g = 0
c_to_a = 0
c_to_t = 0
c_to_g = 0
g_to_a = 0
g_to_t = 0
g_to_c = 0
for x in range (1, rows1):
if table1[x][3] == 'A':
if table1[x][4] == 'T':
a_to_t += 1
elif table1[x][4] == 'C':
a_to_c += 1
elif table1[x][4] == 'G':
a_to_g += 1
elif table1[x][3] == 'T':
if table1[x][4] == 'A':
t_to_a += 1
elif table1[x][4] == 'C':
t_to_c += 1
elif table1[x][4] == 'G':
t_to_g += 1
elif table1[x][3] == 'C':
if table1[x][4] == 'A':
c_to_a += 1
elif table1[x][4] == 'T':
c_to_t += 1
elif table1[x][4] == 'G':
c_to_g += 1
elif table1[x][3] == 'G':
if table1[x][4] == 'A':
g_to_a += 1
elif table1[x][4] == 'T':
g_to_t += 1
elif table1[x][4] == 'C':
g_to_c += 1
a_to_t = a_to_t/(rows1 - 1.0)
a_to_g = a_to_g/(rows1 - 1.0)
a_to_c = a_to_c/(rows1 - 1.0)
t_to_a = t_to_a/(rows1 - 1.0)
t_to_c = t_to_c/(rows1 - 1.0)
t_to_g = t_to_g/(rows1 - 1.0)
c_to_a = c_to_a/(rows1 - 1.0)
c_to_t = c_to_t/(rows1 - 1.0)
c_to_g = c_to_g/(rows1 - 1.0)
g_to_a = g_to_a/(rows1 - 1.0)
g_to_t = g_to_t/(rows1 - 1.0)
g_to_c = g_to_c/(rows1 - 1.0)
table1 = ['NC TC',
a_to_t, a_to_c, a_to_g,
t_to_a, t_to_c, t_to_g,
c_to_a, c_to_t, c_to_g,
g_to_a, g_to_t, g_to_c]
rows2 = len(table2)
print('Comparing %r positions' % rows2)
a_to_t = 0
a_to_g = 0
a_to_c = 0
t_to_a = 0
t_to_c = 0
t_to_g = 0
c_to_a = 0
c_to_t = 0
c_to_g = 0
g_to_a = 0
g_to_t = 0
g_to_c = 0
for x in range (1, rows2):
if table2[x][3] == 'A':
if table2[x][4] == 'T':
a_to_t += 1
elif table2[x][4] == 'C':
a_to_c += 1
elif table2[x][4] == 'G':
a_to_g += 1
elif table2[x][3] == 'T':
if table2[x][4] == 'A':
t_to_a += 1
elif table2[x][4] == 'C':
t_to_c += 1
elif table2[x][4] == 'G':
t_to_g += 1
elif table2[x][3] == 'C':
if table2[x][4] == 'A':
c_to_a += 1
elif table2[x][4] == 'T':
c_to_t += 1
elif table2[x][4] == 'G':
c_to_g += 1
elif table2[x][3] == 'G':
if table2[x][4] == 'A':
g_to_a += 1
elif table2[x][4] == 'T':
g_to_t += 1
elif table2[x][4] == 'C':
g_to_c += 1
a_to_t = a_to_t/(rows1 - 1.0)
a_to_g = a_to_g/(rows1 - 1.0)
a_to_c = a_to_c/(rows1 - 1.0)
t_to_a = t_to_a/(rows1 - 1.0)
t_to_c = t_to_c/(rows1 - 1.0)
t_to_g = t_to_g/(rows1 - 1.0)
c_to_a = c_to_a/(rows1 - 1.0)
c_to_t = c_to_t/(rows1 - 1.0)
c_to_g = c_to_g/(rows1 - 1.0)
g_to_a = g_to_a/(rows1 - 1.0)
g_to_t = g_to_t/(rows1 - 1.0)
g_to_c = g_to_c/(rows1 - 1.0)
table2 = ['NF TF',
a_to_t, a_to_c, a_to_g,
t_to_a, t_to_c, t_to_g,
c_to_a, c_to_t, c_to_g,
g_to_a, g_to_t, g_to_c]
'''
table2 = []
table2.append(a_to_t)
table2.append(a_to_c)
table2.append(a_to_g)
table2.append(t_to_a)
table2.append(t_to_c)
table2.append(t_to_g)
table2.append(c_to_a)
table2.append(c_to_t)
table2.append(c_to_g)
table2.append(g_to_a)
table2.append(g_to_t)
table2.append(g_to_c)
table2 = []
table2.append(['A to T', a_to_t])
table2.append(['A to C', a_to_c])
table2.append(['A to G', a_to_g])
table2.append(['T to A', t_to_a])
table2.append(['T to C', t_to_c])
table2.append(['T to G', t_to_g])
table2.append(['C to A', c_to_a])
table2.append(['C to T', c_to_t])
table2.append(['C to G', c_to_g])
table2.append(['G to A', g_to_a])
table2.append(['G to T', g_to_t])
table2.append(['G to C', g_to_c])
'''
# Output Table
book = xlwt.Workbook()
sheet = book.add_sheet('Nucleotide Changes')
rows = len(table1)
print('Output table has %r positions' % rows)
for x in range(0, rows):
sheet.write(x, 0, table0[x])
sheet.write(x, 1, table1[x])
sheet.write(x, 2, table2[x])
book.save(output_file)
os.chdir(old_dir)
print('Finished')
|
|
#
# PgHelp.py -- web application threading help routines.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import tornado.web
import tornado.websocket
import tornado.template
from tornado.ioloop import IOLoop
import random
import json
import os, time
import datetime
import binascii
from collections import namedtuple
from io import BytesIO
from ginga.misc import Bunch
from ginga.util import io_rgb
default_interval = 10
ConfigEvent = namedtuple("ConfigEvent", ["type", "id", "width", "height"])
InputEvent = namedtuple("InputEvent", ["type", "id", "x", "y", "button",
"delta", "alt_key", "ctrl_key",
"meta_key", "shift_key", "key_code"])
GestureEvent = namedtuple("GestureEvent", ["type", "id", "x", "y", "dx", "dy",
"distance",
"theta", "direction", "vx", "vy",
"scale", "rotation", "isfirst",
"isfinal"])
WidgetEvent = namedtuple("WidgetEvent", ["type", "id", "value"])
TimerEvent = namedtuple("TimerEvent", ["type", "id", "value"])
class ApplicationHandler(tornado.websocket.WebSocketHandler):
def initialize(self, name, app):
self.name = name
self.app = app
self.app.add_ws_handler(self)
self.event_callbacks = {
"activate": WidgetEvent,
"setbounds": ConfigEvent,
"mousedown": InputEvent,
"mouseup": InputEvent,
"mousemove": InputEvent,
"mouseout": InputEvent,
"mouseover": InputEvent,
"mousewheel": InputEvent,
"wheel": InputEvent,
"click": InputEvent,
"dblclick": InputEvent,
"keydown": InputEvent,
"keyup": InputEvent,
"keypress": InputEvent,
"resize": ConfigEvent,
"focus": InputEvent,
"focusout": InputEvent,
"blur": InputEvent,
"drop": InputEvent,
#"paste": InputEvent,
# These are all Hammer.js events
"pinch": GestureEvent,
"pinchstart": GestureEvent,
"pinchend": GestureEvent,
"rotate": GestureEvent,
"rotatestart": GestureEvent,
"rotateend": GestureEvent,
"pan": GestureEvent,
"panstart": GestureEvent,
"panend": GestureEvent,
"tap": GestureEvent,
"swipe": GestureEvent,
}
#self.interval = 10
interval = self.settings.get("timer_interval", default_interval)
if self.name in self.settings:
interval = self.settings[self.name].get("timer_interval", interval)
self.interval = interval
# randomize the first timeout so we don't get every timer
# expiring at the same time
interval = random.randint(1, self.interval)
delta = datetime.timedelta(milliseconds=interval)
self.timeout = IOLoop.current().add_timeout(delta, self.timer_tick)
def add_event_type(self, msg_type, event_class):
self.event_callbacks[msg_type] = event_class
def on_open(self, *args, **kwdargs):
self.set_nodelay(True)
def on_close(self):
IOLoop.current().remove_timeout(self.timeout)
def on_message(self, raw_message):
message = json.loads(raw_message)
event_type = message.get("type")
try:
event_class = self.event_callbacks[event_type]
except KeyError:
print("I don't know how to process '%s' events!" % (
event_type))
return
event = event_class(**message)
self.app.widget_event(event)
def do_operation(self, operation, **kwargs):
message = dict(kwargs, operation=operation)
raw_message = json.dumps(message)
self.write_message(raw_message)
def timer_tick(self):
event = TimerEvent(type="timer", id=0, value=time.time())
# TODO: should exceptions thrown from this be caught and ignored
self.app.widget_event(event)
delta = datetime.timedelta(milliseconds = self.interval)
self.timeout = IOLoop.current().add_timeout(delta, self.timer_tick)
class WindowHandler(tornado.web.RequestHandler):
def initialize(self, name, url, app):
self.app = app
self.logger = app.logger
self.logger.info("windowhandler initialize")
self.name = name
self.url = url
def make_index(self, wids):
template = '''
<!doctype html>
<html>
<head>
<title>%(title)s</title>
</head>
<body>
%(content)s
</body>
</html>'''
content = ["<ul>"]
for wid in wids:
content.append('''<li><a href="%s?id=%s">Window %s</a></li>''' % (
self.url, wid, wid))
content.append("</ul>")
return template % dict(title="Window index", content=''.join(content))
def get(self):
self.logger.info("windowhandler get")
# Collect arguments
wid = self.get_argument('id', None)
# Get window with this id
wids = self.app.get_wids()
if wid in wids:
window = self.app.get_window(wid)
output = window.render()
else:
output = self.make_index(wids)
self.write(output)
class Timer(object):
"""Abstraction of a GUI-toolkit implemented timer."""
def __init__(self, ival_sec, expire_cb, data=None, mplcanvas=None):
"""Create a timer set to expire after `ival_sec` and which will
call the callable `expire_cb` when it expires.
"""
self.ival_sec = ival_sec
self.cb = expire_cb
self.data = data
self._timer = mplcanvas.new_timer()
self._timer.single_shot = True
self._timer.add_callback(self._redirect_cb)
def start(self, ival_sec=None):
"""Start the timer. If `ival_sec` is not None, it should
specify the time to expiration in seconds.
"""
if ival_sec is None:
ival_sec = self.ival_sec
self.cancel()
# Matplotlib timer set in milliseconds
time_ms = int(ival_sec * 1000.0)
self._timer.interval = time_ms
self._timer.start()
def _redirect_cb(self):
self.cb(self)
def cancel(self):
"""Cancel this timer. If the timer is not running, there
is no error.
"""
try:
self._timer.stop()
except:
pass
def get_image_src_from_buffer(img_buf, imgtype='png'):
img_string = binascii.b2a_base64(img_buf)
if isinstance(img_string, bytes):
img_string = img_string.decode("utf-8")
return ('data:image/%s;base64,' % imgtype) + img_string
def get_icon(iconpath, size=None, format='png'):
image = io_rgb.PILimage.open(iconpath)
if size is not None:
wd, ht = size
else:
wd, ht = 24, 24
image = image.resize((wd, ht))
img_buf = BytesIO()
image.save(img_buf, format=format)
icon = get_image_src_from_buffer(img_buf.getvalue(), imgtype=format)
return icon
def get_font(font_family, point_size):
font = '%s %d' % (font_family, point_size)
return font
#END
|
|
from ascension.util import SettingSet, Singleton, SettingBased
from pykfs.kfslog import CONSOLE_HANDLER, CONSOLE_FORMATTER
import yaml
import os
import math
CONF_FILE_NAME = "ascension_conf.yaml"
game_settings = SettingSet([
{
"name": "target_frame_rate",
"default": 60,
"parse": int,
},
{
"name": "slow_frame_log_level",
"default": "WARNING"
},
{
"name": "logging",
"default": {
"version": 1,
"loggers": {
"ascension": {
"handlers": ["console"],
"level": "INFO",
},
},
"handlers": {
"console": CONSOLE_HANDLER,
},
"formatters": {
"console": CONSOLE_FORMATTER,
},
}
},
{
"name": "logging_append",
"default": {}
},
{
"name": "img_dir",
"default": "data/img/x3",
},
{
"name": "atlas_image",
"default": "data/img/ASCENSION_ATLAS.png",
},
{
"name": "atlas_meta",
"default": "data/img/ASCENSION_ATLAS_META.json"
},
{
"name": "window_width",
"default": 1400,
"parse": int,
},
{
"name": "window_height",
"default": 800,
"parse": int,
},
{
"name": "disabled_profilers",
"default": []
},
{
"name": "sprite_scale",
"default": 3,
"parse": int,
},
{
"name": "tile_width",
"default": 71,
"parse": int,
},
{
"name": "tile_height",
"default": 30,
"parse": int,
},
{
"name": "horz_point_width",
"default": 16,
"parse": int,
},
{
"name": "frame_tile_count_horz",
"default": 2,
"parse": int,
},
{
"name": "frame_tile_count_vert",
"default": 2,
"parse": int,
},
{
"name": "shroud_fade_delay",
"default": 0.5,
"parse": float,
},
{
"name": "shroud_fade_time",
"default": 0.5,
"parse": float,
},
{
"name": "shroud_move_speed",
"default": 30,
"parse": float,
},
{
"name": "reveal_map",
"default": False,
"parse": bool,
},
{
"name": "scroller_sleep",
"default": 0.5,
"parse": float,
},
{
"name": "quit_sleep",
"default": 0.05,
"parse": float,
},
{
"name": "max_quit_wait_time",
"default": 5.0,
"parse": float,
},
{
"name": "tilemap_refresh_stages",
"default": 10,
"parse": int,
},
{
"name": "unitset_refresh_stages",
"default": 10,
"parse": int,
},
{
"name": "scroller_mode",
"default": "DYNAMIC",
},
{
"name": "fixed_scroller_width",
"default": 50,
"parse": int,
},
{
"name": "fixed_scroller_height",
"default": 25,
"parse": int,
},
{
"name": "sprite_manager_report_frequency",
"default": 0.0,
"parse": float,
},
{
"name": "map_width",
"default": 42,
"parse": int,
},
{
"name": "map_height",
"default": 28,
"parse": int,
},
{
"name": "sea_perlin_size_multiplier",
"default": 4,
"parse": int
},
{
"name": "forest_perlin_size_multiplier",
"default": 8,
"parse": int,
},
{
"name": "mountain_perlin_size_multiplier",
"default": 8,
"parse": int,
},
{
"name": "sea_percentage",
"default": 0.5,
"parse": float,
},
{
"name": "mountain_percentage",
"default": 0.1,
"parse": float,
},
{
"name": "forest_percentage",
"default": 0.2,
"parse": float,
},
])
player_settings = SettingSet([
{
"name": "key_bindings",
"default": {
},
},
{
"name": "scroll_speed",
"default": 400,
"parse": int,
},
{
"name": "unit_move_speed",
"default": 50
}
])
def calc_property(func):
def new_func(self):
cache_name = "_{}".format(func.__name__)
if not hasattr(self, cache_name):
setattr(self, cache_name, func(self))
return getattr(self, cache_name)
return property(new_func)
class AscensionConf(SettingBased):
__metaclass__ = Singleton
settingset = game_settings
def __init__(self):
values = {}
if os.path.isfile(CONF_FILE_NAME):
with open(CONF_FILE_NAME) as f:
values = yaml.load(f) or {}
super(AscensionConf, self).__init__(**values)
@calc_property
def perspective_sin(self):
i = self.tile_height / 2.0
x = self.tile_width / 2.0
z = self.tile_width / 2.0 - self.horz_point_width
return i / math.sqrt(x**2-z**2)
@calc_property
def diagonal_distance_multiplier(self):
return math.sqrt(self.perspective_sin**2 + 3) / 2
@calc_property
def tile_point_slope(self):
return self.tile_height / 2 / (self.horz_point_width - 1)
@calc_property
def tile_center_width(self):
return self.tile_width - 2*self.horz_point_width
@calc_property
def frame_width(self):
return (
self.frame_tile_count_horz * (self.tile_center_width+self.horz_point_width-1)
)
@calc_property
def frame_height(self):
return self.frame_tile_count_vert * self.tile_height
@calc_property
def frame_pixel_count(self):
return self.frame_width * self.frame_height
def get_speed_multiplier(self, direction):
if direction in [(0, 1), (0, -1)]:
return self.perspective_sin
else:
return self.diagonal_distance_multiplier
AscensionConf.reset()
class PlayerConf(SettingBased):
__metaclass__ = Singleton
settingset = player_settings
|
|
import multiprocessing
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except Exception:
_available = False
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_device(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
self.device.use()
self.setup()
while True:
job, data = self.pipe.recv()
if job == 'finalize':
self.device.device.synchronize()
break
if job == 'update':
# For reducing memory
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
with self.reporter.scope({}): # pass dummy observation
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
del gp
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
# The context is not initialized, it will be fine.
_cuda_initialized = False
if _cuda_initialized:
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
# Correct optimizer parameters for new minibatch size
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('GPU devices must be specified.')
self._master = optimizer.target
self._devices = [chainer.get_device(device) for device in devices]
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with chainer.using_device(self._devices[0]):
self._master.to_device(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(('set comm_id', comm_id))
self.comm = nccl.NcclCommunicator(
len(self._devices), comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with chainer.using_device(self._devices[0]):
# For reducing memory
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad')
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data')
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
return _scatter(link, array, 'grad')
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
return _scatter(link, array, 'data')
def _get_nccl_data_type(dtype):
"""Get data type for NCCL"""
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
|
|
"""
User Account
"""
import datetime
from flask import redirect, request, url_for, session, jsonify, abort, make_response
from webportfolio import (WebPortfolio, route, flash_error, flash_success,
flash_info, flash_data, get_flashed_data, register_package, init_app,
ModelError, ViewError)
from webportfolio.decorators import (nav_menu, login_required, no_login_required,
with_user_roles)
from webportfolio.ext import (mailer, cache, storage, recaptcha, csrf,
user_authenticated, user_not_authenticated)
import webportfolio.utils as utils
from flask_login import (LoginManager, login_user, logout_user, current_user,
fresh_login_required, UserMixin)
# Primary Roles
PRIMARY_ROLES = [(90, "SUPERADMIN"), # ALL MIGHTY, RESERVED FOR SYS ADMIN
(80, "ADMIN"), # App/Site admin
(70, "MANAGER"), # Limited access, but can approve EDITOR Data
(60, "EDITOR"), # Rights to write, manage, publish own data
(50, "CONTRIBUTOR"), # Rights to only write and read own data
(10, "USER") # Simple user
]
register_package(__name__)
# ------------------------------------------------------------------------------
# The user_model create a fully built model with social signin
def model(db):
class UserRole(db.Model):
name = db.Column(db.String(75), index=True)
level = db.Column(db.Integer, index=True)
@classmethod
def new(cls, name, level):
name = utils.slugify(name)
role = cls.get_by_name(name)
if not role:
role = cls.create(name=name, level=level)
return role
@classmethod
def get_by_name(cls, name):
name = utils.slugify(name)
return cls.all().filter(cls.name == name).first()
@classmethod
def get_by_level(cls, level):
return cls.all().filter(cls.level == level).first()
class User(UserMixin, db.Model):
role_id = db.Column(db.Integer, db.ForeignKey(UserRole.id))
email = db.Column(db.String(75), index=True, unique=True)
email_confirmed = db.Column(db.Boolean, default=False)
password_hash = db.Column(db.String(255))
has_temp_login = db.Column(db.Boolean, default=False)
temp_login_token = db.Column(db.String(100), index=True)
temp_login_expiration = db.Column(db.DateTime)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
date_of_birth = db.Column(db.Date)
sex = db.Column(db.String(10)) # To get confusion out of the way, Sex refers to natural/biological features.
profile_image_url = db.Column(db.String(255))
signup_method = db.Column(db.String(255))
active = db.Column(db.Boolean, default=True, index=True)
last_login = db.Column(db.DateTime)
last_visited = db.Column(db.DateTime)
role = db.relationship(UserRole)
# ------ FLASK-LOGIN REQUIRED METHODS ----------------------------------
@property
def is_active(self):
return self.active
# ---------- END FLASK-LOGIN REQUIREMENTS ------------------------------
@classmethod
def get_by_email(cls, email):
"""
Return a User by email address
"""
return cls.all().filter(cls.email == email).first()
@classmethod
def get_by_temp_login(cls, token):
"""
Return a User by temp_login_token
temp_login_token allows a user to login with the token
and reset the password
"""
user = cls.all().filter(cls.temp_login_token == token).first()
if user:
now = datetime.datetime.now()
if user.has_temp_login is True \
and user.temp_login_expiration > now:
return user
user.clear_temp_login()
return None
@classmethod
def get_by_oauth(cls, provider, provider_user_id):
"""
Get a user by OAuth
:param provider:
:param provider_user_id:
:return: User
"""
oauth = UserOauthLogin.get_by_provider(provider=provider,
provider_user_id=provider_user_id)
return oauth.user if oauth else None
@classmethod
def new(cls,
email,
password=None,
first_name=None,
last_name=None,
role="USER",
signup_method="email",
profile_image_url=None,
**kwargs):
"""
Create a new user account
"""
user = cls.get_by_email(email)
if user:
raise ModelError("User exists already")
user = cls.create(email=email,
first_name=first_name,
last_name=last_name,
signup_method=signup_method,
profile_image_url=profile_image_url)
if password:
user.set_password(password)
if role:
role_ = UserRole.get_by_name(role.upper())
if role_:
user.update(role_id=role_.id)
return user
@property
def full_name(self):
"""
Return the full name
:return:
"""
return "%s %s" % (self.first_name, self.last_name)
@property
def name(self):
"""
Alias to first_name
:return:
"""
return self.first_name
def password_matched(self, password):
"""
Check if the password matched the hash
:returns bool:
"""
return utils.verify_encrypted_string(password, self.password_hash)
def set_password(self, password, random=False):
"""
Encrypt the password and save it in the DB
Return the password passed or the new password if randomed
"""
if random:
password = utils.generate_random_string()
self.update(password_hash=utils.encrypt_string(password))
return password
def set_temp_login(self, expiration=60):
"""
Create temp login.
It will allow to have change password on account
:param expiration: in minutes the time for expiration
"""
expiration = datetime.datetime.now() + datetime.timedelta(minutes=expiration)
while True:
token = utils.generate_random_string(32).lower()
if not User.all().filter(User.temp_login_token == token).first():
break
self.update(has_temp_login=True,
temp_login_token=token,
temp_login_expiration=expiration)
return token
def clear_temp_login(self):
self.update(has_temp_login=False,
temp_login_token=None,
temp_login_expiration=None)
def add_oauth(self, provider, provider_user_id, **kwargs):
"""
To attach a user account to an OAUTH login
:param provider: the name of the provider
:param provider_user_id: the id
:param kwargs:
:return: Return UserOauthLogin
"""
u = UserOauthLogin.get_by_provider(provider=provider,
provider_user_id=provider_user_id)
if u:
return u
return UserOauthLogin.create(user_id=self.id,
provider=provider,
provider_user_id=provider_user_id,
**kwargs)
def has_any_roles(self, *roles):
"""
Check if user has any of the roles requested
:param roles: tuple of roles string
:return: bool
"""
roles = map(utils.slugify, list(roles))
for r in UserRole.all().filter(UserRole.name.in_(roles)):
if r.id == self.role_id:
return True
return False
class UserOauthLogin(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
provider = db.Column(db.String(50), index=True)
provider_user_id = db.Column(db.String(255))
name = db.Column(db.String(255))
email = db.Column(db.String(255))
profile_image_url = db.Column(db.String(255))
access_token = db.Column(db.String(255))
access_key_id = db.Column(db.String(255))
access_secret_key = db.Column(db.String(255))
link = db.Column(db.String(255))
user = db.relationship(User, backref="oauth_logins")
@classmethod
def get_by_provider(cls, provider, provider_user_id):
"""
Returns the entry of the provider and user id
:params provider: str - the provider name
:params provider_user_id:
"""
return cls.all()\
.filter(cls.provider == provider)\
.filter(cls.provider_user_id == provider_user_id)\
.first()
return utils.to_struct(User=User,
Role=UserRole,
OauthLogin=UserOauthLogin)
# ------------------------------------------------------------------------------
def account(view, **kwargs):
"""
This view is extendable
kwargs:
- on_signin_view
- on_signout_view
- template_dir
"""
view_name = view.__name__
model = kwargs.pop("model")
User = model.User.User
nav_menu_context = dict(module_=view.__module__, class_=view.__name__)
login_view = "UserAccount:login"
on_signin_view = kwargs["on_signin_view"] if "on_signin_view" \
in kwargs else "Index:index"
on_signout_view = kwargs["on_signout_view"] if "on_signout_view" \
in kwargs else "Index:index"
template_dir = kwargs["template_dir"] if "template_dir" \
in kwargs else "WebPortfolio/Package/User/Account"
template_page = template_dir + "/%s.html"
login_manager = LoginManager()
login_manager.login_view = login_view
login_manager.login_message_category = "error"
init_app(login_manager.init_app)
@login_manager.user_loader
def load_user(userid):
return User.get(userid)
class Account(object):
decorators = view.decorators + [login_required]
SESSION_KEY_SET_EMAIL_DATA = "set_email_tmp_data"
TEMP_DATA_KEY = "login_tmp_data"
@property
def tmp_data(self):
return session[self.TEMP_DATA_KEY]
@tmp_data.setter
def tmp_data(self, data):
session[self.TEMP_DATA_KEY] = data
@classmethod
def _login_enabled(cls):
if not cls.config_("MODULE_USER_ACCOUNT_ENABLE_LOGIN"):
abort(403)
@classmethod
def _signup_enabled(cls):
if not cls.config_("MODULE_USER_ACCOUNT_ENABLE_SIGNUP"):
abort(403)
@classmethod
def login_user(cls, user):
login_user(user)
now = datetime.datetime.now()
user.update(last_login=now, last_visited=now)
@classmethod
def _oauth_enabled(cls):
if not cls.config_("MODULE_USER_ACCOUNT_ENABLE_OAUTH_LOGIN"):
abort(403)
@nav_menu("Login",
endpoint="UserAccount:login",
show=user_not_authenticated, **nav_menu_context)
@route("login/", methods=["GET", "POST"], endpoint="UserAccount:login")
@no_login_required
def login(self):
""" Login page """
self._login_enabled()
logout_user()
self.tmp_data = None
self.meta_(title="Login")
if request.method == "POST":
email = request.form.get("email").strip()
password = request.form.get("password").strip()
if not email or not password:
flash_error("Email or Password is empty")
return redirect(url_for(login_view, next=request.form.get("next")))
user = User.get_by_email(email)
if user and user.password_hash and user.password_matched(password):
self.login_user(user)
return redirect(request.form.get("next") or url_for(on_signin_view))
else:
flash_error("Email or Password is invalid")
return redirect(url_for(login_view, next=request.form.get("next")))
return self.render_(login_url_next=request.args.get("next", ""),
login_url_default=url_for(on_signin_view),
signup_enabled=self.config_("MODULE_USER_ACCOUNT_ENABLE_SIGNUP"),
oauth_enabled=self.config_("MODULE_USER_ACCOUNT_ENABLE_OAUTH_LOGIN"),
view_template_=template_page % "login")
@nav_menu("Logout",
endpoint="UserAccount:logout",
show=user_authenticated,
order=100, **nav_menu_context)
@route("logout/", endpoint="UserAccount:logout")
@no_login_required
def logout(self):
logout_user()
return redirect(url_for(on_signout_view or login_view))
@nav_menu("Signup",
endpoint="UserAccount:signup",
show=[user_not_authenticated], **nav_menu_context)
@route("signup/", methods=["GET", "POST"], endpoint="UserAccount:signup")
@no_login_required
def signup(self):
"""
For Email Signup
:return:
"""
self._login_enabled()
self._signup_enabled()
self.meta_(title="Signup")
if request.method == "POST":
# reCaptcha
if not recaptcha.verify():
flash_error("Invalid Security code")
return redirect(url_for("UserAccount:signup",
next=request.form.get("next")))
try:
name = request.form.get("name")
email = request.form.get("email")
password = request.form.get("password")
password2 = request.form.get("password2")
profile_image_url = request.form.get("profile_image_url", None)
if not name:
raise ViewError("Name is required")
elif not utils.is_valid_email(email):
raise ViewError("Invalid email address '%s'" % email)
elif not password.strip() or password.strip() != password2.strip():
raise ViewError("Passwords don't match")
elif not utils.is_valid_password(password):
raise ViewError("Invalid password")
else:
new_account = User.new(email=email,
password=password.strip(),
first_name=name,
profile_image_url=profile_image_url,
signup_method="email")
self.login_user(new_account)
return redirect(request.form.get("next") or url_for(on_signin_view))
except Exception as ex:
flash_error(ex.message)
return redirect(url_for("UserAccount:signup",
next=request.form.get("next")))
logout_user()
return self.render_(login_url_next=request.args.get("next", ""),
view_template_=template_page % "signup")
@route("lost-password/",
methods=["GET", "POST"],
endpoint="UserAccount:lost_password")
@no_login_required
def lost_password(self):
self._login_enabled()
logout_user()
self.meta_(title="Lost Password")
if request.method == "POST":
email = request.form.get("email")
user = User.get_by_email(email)
if user:
delivery = self.config_("MODULE_USER_ACCOUNT_RESET_PASSWORD_METHOD")
new_password = None
if delivery.upper() == "TOKEN":
token = user.set_temp_login()
url = url_for("UserAccount:reset_password",
token=token,
_external=True)
else:
new_password = user.set_password(password=None, random=True)
url = url_for("UserAccount:login", _external=True)
mailer.send_template("reset-password.txt",
method_=delivery,
to=user.email,
name=user.email,
url=url,
new_password=new_password)
flash_success("A new password has been sent to '%s'" % email)
else:
flash_error("Invalid email address")
return redirect(url_for(login_view))
else:
return self.render_(view_template_=template_page % "lost_password")
@nav_menu("Account Settings",
endpoint="UserAccount:account_settings",
order=99,
show=user_authenticated, **nav_menu_context)
@route("account-settings",
methods=["GET", "POST"],
endpoint="UserAccount:account_settings")
@fresh_login_required
def account_settings(self):
self.meta_(title="Account Settings")
if request.method == "POST":
action = request.form.get("action")
try:
action = action.lower()
#
if action == "info":
first_name = request.form.get("first_name").strip()
last_name = request.form.get("last_name", "").strip()
data = {
"first_name": first_name,
"last_name": last_name
}
current_user.update(**data)
flash_success("Account info updated successfully!")
#
elif action == "login":
confirm_password = request.form.get("confirm-password").strip()
if current_user.password_matched(confirm_password):
self.change_login_handler()
flash_success("Login Info updated successfully!")
else:
flash_error("Invalid password")
#
elif action == "password":
confirm_password = request.form.get("confirm-password").strip()
if current_user.password_matched(confirm_password):
self.change_password_handler()
flash_success("Password updated successfully!")
else:
flash_error("Invalid password")
elif action == "profile-photo":
file = request.files.get("file")
if file:
prefix = "profile-photos/%s/" % current_user.id
extensions = ["jpg", "jpeg", "png", "gif"]
my_photo = storage.upload(file,
prefix=prefix,
allowed_extensions=extensions)
if my_photo:
url = my_photo.url
current_user.update(profile_image_url=url)
flash_success("Profile Image updated successfully!")
else:
raise ViewError("Invalid action")
except Exception as e:
flash_error(e.message)
return redirect(url_for("UserAccount:account_settings"))
return self.render_(view_template_=template_page % "account_settings")
@classmethod
def change_login_handler(cls, user_context=None, email=None):
if not user_context:
user_context = current_user
if not email:
email = request.form.get("email").strip()
if not utils.is_valid_email(email):
raise UserWarning("Invalid email address '%s'" % email)
else:
if email != user_context.email and User.get_by_email(email):
raise UserWarning("Email exists already '%s'" % email)
elif email != user_context.email:
user_context.update(email=email)
return True
return False
@classmethod
def change_password_handler(cls, user_context=None, password=None,
password2=None):
if not user_context:
user_context = current_user
if not password:
password = request.form.get("password").strip()
if not password2:
password2 = request.form.get("password2").strip()
if password:
if password != password2:
raise UserWarning("Password don't match")
elif not utils.is_valid_password(password):
raise UserWarning("Invalid password")
else:
user_context.set_password(password)
return True
else:
raise UserWarning("Password is empty")
# OAUTH Login
@route("oauth-login/<provider>", methods=["GET", "POST"], endpoint="UserAccount:oauth_login")
@no_login_required
def oauth_login(self, provider):
""" Login via oauth providers """
self._login_enabled()
self._oauth_enabled()
provider = provider.lower()
result = oauth.login(provider)
response = oauth.response
popup_js_custom = {
"action": "",
"url": ""
}
if result:
if result.error:
pass
elif result.user:
result.user.update()
oauth_user = result.user
user = User.get_by_oauth(provider=provider,
provider_user_id=oauth_user.id)
if not user:
if oauth_user.email and User.get_by_email(oauth_user.email):
flash_error("Account already exists with this email '%s'. "
"Try to login or retrieve your password " % oauth_user.email)
popup_js_custom.update({
"action": "redirect",
"url": url_for(login_view, next=request.form.get("next"))
})
else:
tmp_data = {
"is_oauth": True,
"provider": provider,
"id": oauth_user.id,
"name": oauth_user.name,
"picture": oauth_user.picture,
"first_name": oauth_user.first_name,
"last_name": oauth_user.last_name,
"email": oauth_user.email,
"link": oauth_user.link
}
if not oauth_user.email:
self.tmp_data = tmp_data
popup_js_custom.update({
"action": "redirect",
"url": url_for("UserAccount:setup_login")
})
else:
try:
picture = oauth_user.picture
user = User.new(email=oauth_user.email,
name=oauth_user.name,
signup_method=provider,
profile_image_url=picture
)
user.add_oauth(provider,
oauth_user.provider_id,
name=oauth_user.name,
email=oauth_user.email,
profile_image_url=oauth_user.picture,
link=oauth_user.link)
except ModelError as e:
flash_error(e.message)
popup_js_custom.update({
"action": "redirect",
"url": url_for("UserAccount:login")
})
if user:
self.login_user(user)
return self.render_(popup_js=result.popup_js(custom=popup_js_custom),
view_template_=template_page % "oauth_login")
return response
@route("setup-login/", methods=["GET", "POST"], endpoint="UserAccount:setup_login")
def setup_login(self):
"""
Allows to setup a email password if it's not provided specially
coming from oauth-login
:return:
"""
self._login_enabled()
self.meta_(title="Setup Login")
# Only user without email can set email
if current_user.is_authenticated() and current_user.email:
return redirect(url_for("%s:account_settings" % view_name))
if self.tmp_data:
if request.method == "POST":
if not self.tmp_data["is_oauth"]:
return redirect("UserAccount:login")
try:
email = request.form.get("email")
password = request.form.get("password")
password2 = request.form.get("password2")
if not utils.is_valid_email(email):
raise ViewError("Invalid email address '%s'" % email)
elif User.get_by_email(email):
raise ViewError("An account exists already with this email address '%s' " % email)
elif not password.strip() or password.strip() != password2.strip():
raise ViewError("Passwords don't match")
elif not utils.is_valid_password(password):
raise ViewError("Invalid password")
else:
user = User.new(email=email,
password=password.strip(),
name=self.tmp_data["name"],
profile_image_url=self.tmp_data["picture"],
signup_method=self.tmp_data["provider"])
user.add_oauth(self.tmp_data["provider"],
self.tmp_data["id"],
name=self.tmp_data["name"],
email=email,
profile_image_url=self.tmp_data["picture"],
link=self.tmp_data["link"])
self.login_user(user)
self.tmp_data = None
return redirect(request.form.get("next") or url_for(on_signin_view))
except Exception as ex:
flash_error(ex.message)
return redirect(url_for("UserAccount:setup_login"))
return self.render_(provider=self.tmp_data,
view_template_=template_page % "setup_login")
else:
return redirect(url_for("UserAccount:login"))
@route("reset-password/<token>",
methods=["GET", "POST"],
endpoint="UserAccount:reset_password")
@no_login_required
def reset_password(self, token):
self._login_enabled()
logout_user()
self.meta_(title="Reset Password")
user = User.get_by_temp_login(token)
if user:
if not user.has_temp_login:
return redirect(url_for(on_signin_view))
if request.method == "POST":
try:
self.change_password_handler(user_context=user)
user.clear_temp_login()
flash_success("Password updated successfully!")
return redirect(url_for(on_signin_view))
except Exception as ex:
flash_error("Error: %s" % ex.message)
return redirect(url_for("UserAccount:reset_password", token=token))
else:
return self.render_(token=token,
view_template_=template_page % "reset_password")
else:
abort(404, "Invalid token")
@route("oauth-connect", methods=["POST"], endpoint="UserAccount:oauth_connect")
def oauth_connect(self):
""" To login via social """
email = request.form.get("email").strip()
name = request.form.get("name").strip()
provider = request.form.get("provider").strip()
provider_user_id = request.form.get("provider_user_id").strip()
image_url = request.form.get("image_url").strip()
next = request.form.get("next", "")
try:
current_user.oauth_connect(provider=provider,
provider_user_id=provider_user_id,
email=email,
name=name,
image_url=image_url)
except Exception as ex:
flash_error("Unable to link your account")
return redirect(url_for("%s:account_settings" % view_name))
return Account
# ------------------------------------------------------------------------------
# ADMIN
PRIVILEDGED_ROLES = ['superadmin', 'admin', 'manager']
def admin(view, **kwargs):
route_base = "user-admin"
menu_name = "User Admin"
model = kwargs.get("model")
User = model.User.User
Role = model.User.Role
template_dir = kwargs.get("template_dir", "WebPortfolio/Package/User/Admin")
template_page = template_dir + "/%s.html"
# Create a Admin menu for all the methods in Admin
@nav_menu(menu_name, group="admin")
class NavMenu(object): pass
# The nav_menu_context helps attach all the methods to NavMenu
nav_menu_context = dict(module_=NavMenu.__module__,
class_=NavMenu.__name__)
class Admin(object):
decorators = view.decorators + [login_required, with_user_roles(*PRIVILEDGED_ROLES)]
@classmethod
def _validate_admin_roles(cls, user):
admin = current_user
@classmethod
def _user_roles_options(cls):
_r = Role.all()\
.filter(Role.level <= current_user.role.level)\
.order_by(Role.level.desc())
return [(r.id, r.name) for r in _r]
@nav_menu("All Users", endpoint="UserAdmin:index", order=1, **nav_menu_context)
@route("%s/" % route_base, endpoint="UserAdmin:index")
def user_admin_index(self):
self.meta_(title="Users - User Admin")
per_page = self.config_("APPLICATION_PAGINATION_PER_PAGE", 25)
page = request.args.get("page", 1)
include_deleted = True if request.args.get("include-deleted") == "y" else False
name = request.args.get("name")
email = request.args.get("email")
role = request.args.get("role")
sorting = request.args.get("sorting", "first_name__asc")
users = User.all(include_deleted=include_deleted)
users = users.join(Role).filter(Role.level <= current_user.role.level)
if name:
users = users.filter(User.first_name.contains(name))
if email:
users = users.filter(User.email.contains(email))
if role:
users = users.filter(User.role_id == int(role))
if sorting and "__" in sorting:
col, dir = sorting.split("__", 2)
if dir == "asc":
users = users.order_by(getattr(User, col).asc())
else:
users = users.order_by(getattr(User, col).desc())
users = users.paginate(page=page, per_page=per_page)
sorting = [("first_name__asc", "Name ASC"),
("first_name__desc", "Name DESC"),
("email__asc", "Email ASC"),
("email__desc", "Email DESC"),
("created_at__asc", "Signup ASC"),
("created_at__desc", "Signup Desc"),
("last_login__asc", "Login ASC"),
("last_login__desc", "Login Desc")]
return self.render_(user_roles_options=self._user_roles_options(),
sorting_options=sorting,
users=users,
search_query={
"include-deleted": request.args.get("include-deleted", "n"),
"role": int(request.args.get("role")) if request.args.get("role") else "",
"status": request.args.get("status"),
"first_name": request.args.get("name", ""),
"email": request.args.get("email", ""),
"sorting": request.args.get("sorting")},
view_template_=template_page % "index")
@nav_menu("User Roles", endpoint="UserAdmin:roles", order=2, **nav_menu_context)
@route("%s/roles" % route_base, methods=["GET", "POST"], endpoint="UserAdmin:roles")
@with_user_roles("superadmin", "admin")
def user_admin_roles(self):
"""
Only admin and super admin can add/remove roles
RESTRICTED ROLES CAN'T BE CHANGED
"""
roles_rage_max = 11
if request.method == "POST":
try:
id = request.form.get("id")
name = request.form.get("name")
level = request.form.get("level")
action = request.form.get("action")
if name and level:
level = int(level)
name = name.upper()
_levels = [r[0] for r in Role.PRIMARY]
_names = [r[1] for r in Role.PRIMARY]
if level in _levels or name in _names:
raise ViewError("Can't modify PRIMARY Roles - name: %s, level: %s " % (name, level))
else:
if id:
role = Role.get(id)
if role:
if action == "delete":
role.delete()
flash_success("Role '%s' deleted successfully!" % role.name)
elif action == "update":
if role.level != level and Role.get_by_level(level):
raise ViewError("Role Level '%s' exists already" % level)
elif role.name != name and Role.get_by_name(name):
raise ViewError("Role Name '%s' exists already" % name)
else:
role.update(name=name, level=level)
flash_success("Role '%s (%s)' updated successfully" % (name, level))
else:
raise ViewError("Role doesn't exist")
else:
if Role.get_by_level(level):
raise ViewError("Role Level '%s' exists already" % level)
elif Role.get_by_name(name):
raise ViewError("Role Name '%s' exists already" % name)
else:
Role.new(name=name, level=level)
flash_success("New Role '%s (%s)' addedd successfully" % (name, level))
except Exception as ex:
flash_error("Error: %s" % ex.message)
return redirect(url_for("UserAdmin:roles"))
else:
self.meta_(title="User Roles - Users Admin")
roles = Role.all().order_by(Role.level.desc())
allocated_levels = [r.level for r in roles]
levels_options = [(l, l) for l in range(1, roles_rage_max) if l not in allocated_levels]
return self.render_(roles=roles,
levels_options=levels_options,
view_template_=template_page % "roles")
@nav_menu("Info", endpoint="UserAdmin:get", show=False, **nav_menu_context)
@route("%s/<id>" % route_base, endpoint="UserAdmin:get")
def user_admin_get(self, id):
self.meta_(title="User Info - Users Admin")
user = User.get(id, include_deleted=True)
if not user:
abort(404, "User doesn't exist")
if current_user.role.level < user.role.level:
abort(403, "Not enough rights to access this user info")
return self.render_(user=user,
user_roles_options=self._user_roles_options(),
view_template_=template_page % "get")
@route("%s/post" % route_base, methods=["POST"], endpoint="UserAdmin:post")
def user_admin_post(self):
try:
id = request.form.get("id")
user = User.get(id, include_deleted=True)
if not user:
flash_error("Can't change user info. Invalid user")
return redirect(url_for("UserAdmin:index"))
if current_user.role.level < user.role.level:
abort(403, "Not enough rights to update this user info")
email = request.form.get("email", "").strip()
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
user_role = request.form.get("user_role")
action = request.form.get("action")
if user.id != current_user.id:
_role = Role.get(user_role)
if not _role:
raise ViewError("Invalid role")
if current_user.role.name.lower() not in PRIVILEDGED_ROLES:
raise ViewError("Not Enough right to change user's info")
if action == "activate":
user.update(active=True)
flash_success("User has been ACTIVATED")
elif action == "deactivate":
user.update(active=False)
flash_success("User is now DEACTIVATED")
elif action == "delete":
user.delete()
flash_success("User has been deleted")
elif action == "undelete":
user.delete(False)
flash_success("User is now active")
else:
if email and email != user.email:
if not utils.is_valid_email(email):
raise ViewError("Invalid email address '%s'" % email)
else:
if User.get_by_email(email):
raise ViewError("Email exists already '%s'" % email)
user.update(email=email)
user.update(first_name=first_name,
last_name=last_name,
role_id=_role.id)
else:
if email and email != user.email:
if not utils.is_valid_email(email):
raise ViewError("Invalid email address '%s'" % email)
else:
if User.get_by_email(email):
raise ViewError("Email exists already '%s'" % email)
user.update(email=email)
user.update(first_name=first_name,
last_name=last_name)
flash_success("User's Info updated successfully!")
except Exception as ex:
flash_error("Error: %s " % ex.message)
return redirect(url_for("UserAdmin:get", id=id))
@route("%s/reset-password" % route_base, methods=["POST"], endpoint="UserAdmin:reset_password")
def user_admin_reset_password(self):
"""
Reset the password
:returns string: The new password string
"""
try:
id = request.form.get("id")
user = User.get(id)
if not user:
raise ViewError("Invalid User")
method_ = self.config_("LOGIN_RESET_PASSWORD_METHOD", "").upper()
new_password = None
if method_ == "TOKEN":
token = user.set_temp_login()
url = url_for("UserAccount:temp_login_token",
token=token,
_external=True)
else:
new_password = user.set_password(password=None, random=True)
url = url_for("UserAccount:login", _external=True)
mailer.send_template("reset-password.txt",
method_=method_,
to=user.email,
name=user.email,
url=url,
new_password=new_password)
flash_success("Password Reset instruction is sent to email")
except Exception as ex:
flash_error("Error: %s " % ex.message)
return redirect(url_for("UserAdmin:get", id=id))
@route("%s/create" % route_base, methods=["POST"], endpoint="UserAdmin:create")
@with_user_roles(*PRIVILEDGED_ROLES)
def user_admin_create(self):
try:
email = request.form.get("email")
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
user_role = request.form.get("user_role")
_role = Role.get(user_role)
if not _role:
raise ViewError("Invalid role")
if current_user.role.level < _role.level:
raise ViewError("Can't be assigned a greater user role")
if not first_name:
raise ViewError("First Name is required")
elif not email:
raise ViewError("Email is required")
elif not utils.is_valid_email(email):
raise ViewError("Invalid email address")
if User.get_by_email(email):
raise ViewError("Email '%s' exists already" % email)
else:
user = User.new(email=email,
first_name=first_name,
last_name=last_name,
signup_method="email-from-admin",
role_id=_role.id)
if user:
flash_success("User created successfully!")
return redirect(url_for("UserAdmin:get", id=user.id))
else:
raise ViewError("Couldn't create new user")
except Exception as ex:
flash_error("Error: %s" % ex.message)
return redirect(url_for("UserAdmin:index"))
return Admin
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
class LSTMLayerTest(test.TestCase):
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.LSTM
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
with self.test_session():
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((2, 3, 2))))
self.assertEqual(len(layer.losses), 4)
def test_constraints_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
with self.test_session():
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_with_masking_layer_LSTM(self):
layer_class = keras.layers.LSTM
with self.test_session():
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = keras.layers.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.backend.random_normal_variable(
(num_samples, units), 0, 1)
for _ in range(num_states)]
layer = keras.layers.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
layer = keras.layers.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = keras.layers.LSTM(units)(inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, stateful=True)
outputs = layer(inputs)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = keras.layers.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = keras.layers.LSTM
with self.test_session():
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
if __name__ == '__main__':
test.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import unittest
import math
import numpy as np
from singa import singa_wrap as singa_api
from singa import tensor
from cuda_helper import gpu_dev, cpu_dev
def _np_bn_training(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
batch_m = x.mean(axis=(0, 2, 3), keepdims=True)
batch_v = x.var(axis=(0, 2, 3), keepdims=True)
x_norm = (x - batch_m) / np.sqrt(batch_v + e)
y_norm = x_norm * scale + bias
# https://arxiv.org/pdf/1502.03167.pdf
s = list(x.shape)
s[1] = 1
batch_v_unbiased = np.prod(s) * batch_v / (np.prod(s) - 1)
rm = momentum * batch_m + (1 - momentum) * rm
rv = momentum * batch_v_unbiased + (1 - momentum) * rv
# https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnBatchNormalizationForwardTraining
resultSaveInvVariance = 1 / np.sqrt(batch_v)
return y_norm, rm, rv, batch_m, resultSaveInvVariance
def _np_bn_testing(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
return scale * (x - rm) / np.sqrt(rv + e) + bias
def _cTensor_to_pyTensor(cTensor):
new_t = tensor.Tensor()
new_t.data = cTensor
new_t.shape = tuple(new_t.data.shape())
new_t.device = new_t.data.device()
new_t.dtype = new_t.data.data_type()
return new_t
def _ctensor_eq_ndarray(t1, np1):
d = t1.device()
t1.ToHost()
if t1.data_type() == singa_api.kInt:
np.testing.assert_array_almost_equal(t1.GetIntValue(t1.Size()),
np1.flatten())
elif t1.data_type() == singa_api.kFloat32:
np.testing.assert_array_almost_equal(t1.GetFloatValue(t1.Size()),
np1.flatten())
if np1.dtype == np.float32:
np.testing.assert_equal(t1.data_type(), singa_api.kFloat32)
elif np1.dtype == np.int32:
np.testing.assert_equal(t1.data_type(), singa_api.kInt)
np.testing.assert_array_almost_equal(t1.shape(), np1.shape)
t1.ToDevice(d)
def print_t(t1):
d = t1.device()
t1.ToHost()
if t1.data_type() == singa_api.kInt:
print(t1.GetIntValue(t1.Size()))
elif t1.data_type() == singa_api.kFloat32:
print(t1.GetFloatValue(t1.Size()))
t1.ToDevice(d)
class TestAPI(unittest.TestCase):
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_batchnorm_training_gpu(self):
dev = gpu_dev
def _run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
(y_1, rm_1, rv_1, bm_1, bv_1) = _np_bn_training(x_0,
s_0,
b_0,
rm_0,
rv_0,
momentum=m_0)
# singa api
rm_t = tensor.Tensor(device=dev, data=rm_0)
rv_t = tensor.Tensor(device=dev, data=rv_0)
hndl = singa_api.CudnnBatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
(y_2_c, bm_2_c, bv_2_c) = singa_api.GpuBatchNormForwardTraining(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data, rm_t.data, rv_t.data)
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))
np.testing.assert_array_almost_equal(
bm_1, tensor.to_numpy(_cTensor_to_pyTensor(bm_2_c)))
np.testing.assert_array_almost_equal(rm_1, tensor.to_numpy(rm_t))
#print(bv_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)))
np.testing.assert_array_almost_equal(
bv_1, tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)), decimal=3)
np.testing.assert_array_almost_equal(rv_1,
tensor.to_numpy(rv_t),
decimal=4)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_batchnorm_testing_gpu(self):
dev = gpu_dev
def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0)
# singa api
hndl = singa_api.CudnnBatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
y_2_c = singa_api.GpuBatchNormForwardInference(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
#print(y_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
def _softmax_api_helper(self, dev):
def _run_test(dev, org_shape, axis, aft_shape):
x_0 = np.random.random(org_shape).astype(np.float32)
x_0 = x_0 + 1000
x0 = tensor.Tensor(device=dev, data=x_0)
# test with axis
y0 = tensor._call_singa_func(singa_api.SoftMax, x0.data, axis)
# test with numpy
x_0 = x_0.reshape(aft_shape)
x_0 = x_0 - np.max(x_0)
y1 = np.divide(np.exp(x_0),
np.sum(np.exp(x_0), axis=1).reshape(x_0.shape[0],
1)) # 2d softmax
y1 = y1.reshape(org_shape)
np.testing.assert_array_almost_equal(tensor.to_numpy(y0), y1)
_run_test(dev, [2, 2], 1, [2, 2])
_run_test(dev, [2, 2], 0, [1, 4])
_run_test(dev, [2, 2], -1, [2, 2])
_run_test(dev, [2, 2], -2, [1, 4])
_run_test(dev, [2, 2, 2], 2, [4, 2])
_run_test(dev, [2, 2, 2], 1, [2, 4])
_run_test(dev, [2, 2, 2], 0, [1, 8])
_run_test(dev, [2, 2, 2], -1, [4, 2])
_run_test(dev, [2, 2, 2], -2, [2, 4])
_run_test(dev, [2, 2, 2], -3, [1, 8])
_run_test(dev, [2, 2, 2, 2], 3, [8, 2])
_run_test(dev, [2, 2, 2, 2], 2, [4, 4])
_run_test(dev, [2, 2, 2, 2], 1, [2, 8])
_run_test(dev, [2, 2, 2, 2], 0, [1, 16])
_run_test(dev, [2, 2, 2, 2], -1, [8, 2])
_run_test(dev, [2, 2, 2, 2], -2, [4, 4])
_run_test(dev, [2, 2, 2, 2], -3, [2, 8])
_run_test(dev, [2, 2, 2, 2], -4, [1, 16])
def test_softmax_api_cpu(self):
self._softmax_api_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_softmax_api_gpu(self):
self._softmax_api_helper(gpu_dev)
def _tensor_arithmetic_op_broadcast_helper(self, dev):
def _run_test(dev, singa_op, np_op, s1, s2):
x_0 = np.random.random(s1).astype(np.float32)
y_0 = np.random.random(s2).astype(np.float32)
x0 = tensor.Tensor(device=dev, data=x_0)
y0 = tensor.Tensor(device=dev, data=y_0)
z0 = tensor._call_singa_func(singa_op, x0.data, y0.data)
z0.to_host()
np.testing.assert_array_almost_equal(tensor.to_numpy(z0),
np_op(x_0, y_0))
return
for s_op, n_op in zip([
singa_api.Pow,
singa_api.__add__,
singa_api.__div__,
singa_api.__sub__,
singa_api.__mul__,
], [np.power, np.add, np.divide, np.subtract, np.multiply]):
_run_test(dev, s_op, n_op, [6], [1])
_run_test(dev, s_op, n_op, [2, 3], [2, 3])
_run_test(dev, s_op, n_op, [3, 2], [1])
_run_test(dev, s_op, n_op, [3, 1, 2], [3, 1, 1])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [5])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [1, 1, 1])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [1, 1, 1, 1])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [4, 5]) # 45+2345=2345
_run_test(dev, s_op, n_op, [3, 1, 2, 1], [3, 1, 2])
_run_test(dev, s_op, n_op, [4, 5], [2, 3, 4, 5]) # 45+2345=2345
_run_test(dev, s_op, n_op, [1, 4, 5], [2, 3, 1, 1]) # 145+2311=2345
_run_test(dev, s_op, n_op, [3, 4, 5], [2, 1, 1, 1]) # 345+2111=2345
def test_tensor_arithmetic_op_broadcast_cpu(self):
self._tensor_arithmetic_op_broadcast_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_tensor_arithmetic_op_broadcast_gpu(self):
self._tensor_arithmetic_op_broadcast_helper(gpu_dev)
def _transpose_and_arithmetic_op_broadcast_helper(self, dev):
def _test(s1, s2, axis1, axis2, s3, s_op, n_op, dev):
x_0 = np.random.random(s1).astype(np.float32)
y_0 = np.random.random(s2).astype(np.float32)
x0 = tensor.Tensor(device=dev, data=x_0)
y0 = tensor.Tensor(device=dev, data=y_0)
x1 = x0.transpose(axis1)
y1 = y0.transpose(axis2)
z0 = tensor._call_singa_func(s_op, x1.data, y1.data)
z0.to_host()
np.testing.assert_array_almost_equal(
tensor.to_numpy(z0),
n_op(x_0.transpose(axis1), y_0.transpose(axis2)))
np.testing.assert_array_almost_equal(z0.shape, s3)
return
for s_op, n_op in zip([
singa_api.Pow,
singa_api.__add__,
singa_api.__div__,
singa_api.__sub__,
singa_api.__mul__,
], [np.power, np.add, np.divide, np.subtract, np.multiply]):
s1 = [1, 5, 1, 3]
s2 = [3, 1, 1, 4]
axis1 = [3, 2, 1, 0] # 3121
axis2 = [1, 0, 2, 3] # 1314
s3 = [3, 3, 5, 4]
_test(s1, s2, axis1, axis2, s3, s_op, n_op, dev)
s1 = [1, 5, 1]
s2 = [1, 3, 2]
axis1 = [2, 1, 0] # 151
axis2 = [1, 0, 2] # 312
s3 = [3, 5, 2]
_test(s1, s2, axis1, axis2, s3, s_op, n_op, dev)
s1 = [5, 1]
s2 = [1, 3]
axis1 = [1, 0] # 15
axis2 = [1, 0] # 31
s3 = [3, 5]
_test(s1, s2, axis1, axis2, s3, s_op, n_op, dev)
def test_transpose_and_arithmetic_op_broadcast_cpu(self):
self._transpose_and_arithmetic_op_broadcast_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_transpose_and_arithmetic_op_broadcast_gpu(self):
self._transpose_and_arithmetic_op_broadcast_helper(gpu_dev)
def test_batchnorm_training_dnnl(self):
dev = cpu_dev
def _np_bn_training(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape,
(1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
batch_m = x.mean(axis=(0, 2, 3), keepdims=True)
batch_v = x.var(axis=(0, 2, 3), keepdims=True)
x_norm = (x - batch_m) / np.sqrt(batch_v + e)
y_norm = x_norm * scale + bias
# https://arxiv.org/pdf/1502.03167.pdf
s = list(x.shape)
s[1] = 1
batch_v_unbiased = np.prod(s) * batch_v / (np.prod(s) - 1)
rm = momentum * batch_m + (1 - momentum) * rm
rv = momentum * batch_v_unbiased + (1 - momentum) * rv
# https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnBatchNormalizationForwardTraining
# this value is useful for bwd computation
resultSaveInvVariance = 1 / np.sqrt(batch_v)
return y_norm, rm, rv, batch_m, resultSaveInvVariance
def _run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
(y_1, rm_1, rv_1, bm_1, bv_1) = _np_bn_training(x_0,
s_0,
b_0,
rm_0,
rv_0,
momentum=m_0)
# singa api
hndl = singa_api.BatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
(y_2_c, bm_2_c, bv_2_c) = singa_api.CpuBatchNormForwardTraining(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
np.testing.assert_array_almost_equal(
bm_1, tensor.to_numpy(_cTensor_to_pyTensor(bm_2_c)), decimal=5)
#print(bv_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)))
#np.testing.assert_array_almost_equal(
# bv_1, tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)), decimal=3)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
def test_batchnorm_testing_dnnl(self):
dev = cpu_dev
def _np_bn_testing(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape,
(1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
return scale * (x - rm) / np.sqrt(rv + e) + bias
def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0)
# singa api
hndl = singa_api.BatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
y_2_c = singa_api.CpuBatchNormForwardInference(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
#print(y_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
def test_batchnorm_backward_dnnl(self):
dev = cpu_dev
N = 1
C = 3
H = 2
W = 2
data_shape = [N, C, H, W]
param_shape = [1, C, 1, 1]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_0 = np.array(data, dtype=np.float32).reshape(data_shape)
y_0 = np.array(data, dtype=np.float32).reshape(data_shape)
dy_0 = np.array(data, dtype=np.float32).reshape(data_shape)
scale_0 = np.array([1] * C, dtype=np.float32).reshape(param_shape)
bias_0 = np.array([0] * C, dtype=np.float32).reshape(param_shape)
mean_0 = x_0.mean(axis=(0, 2, 3), keepdims=True)
var_0 = x_0.var(axis=(0, 2, 3), keepdims=True)
hndl = singa_api.BatchNormHandle(
0.1,
tensor.Tensor(device=dev, data=x_0).data)
(dx_2_c, _, _) = singa_api.CpuBatchNormBackwardx(
hndl,
tensor.Tensor(device=dev, data=y_0).data,
tensor.Tensor(device=dev, data=dy_0).data,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=scale_0).data,
tensor.Tensor(device=dev, data=bias_0).data,
tensor.Tensor(device=dev, data=mean_0).data,
tensor.Tensor(device=dev, data=var_0).data,
)
dx_truth = np.array([[[[-1.0769e-05, -3.5985e-06],
[3.5985e-06, 1.0769e-05]],
[[-1.0769e-05, -3.5985e-06],
[3.5985e-06, 1.0769e-05]],
[[-1.0769e-05, -3.5985e-06],
[3.5985e-06, 1.0769e-05]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(dx_2_c)), dx_truth)
return
def test_softmax_api_dnnl_backend(self):
dev = cpu_dev
def _run_test(org_shape, axis, aft_shape):
x_0 = np.random.random(org_shape).astype(np.float32)
x_0 = x_0 + 1000
x0 = tensor.Tensor(device=dev, data=x_0)
# test with axis
y0 = tensor._call_singa_func(singa_api.SoftMax, x0.data, axis)
# test with numpy
x_0 = x_0.reshape(aft_shape)
x_0 = x_0 - np.max(x_0)
y1 = np.divide(np.exp(x_0),
np.sum(np.exp(x_0), axis=1).reshape(x_0.shape[0],
1)) # 2d softmax
y1 = y1.reshape(org_shape)
np.testing.assert_array_almost_equal(tensor.to_numpy(y0), y1)
_run_test([2, 2], 1, [2, 2])
_run_test([2, 2], 0, [1, 4])
_run_test([2, 2], -1, [2, 2])
_run_test([2, 2], -2, [1, 4])
_run_test([2, 2, 2], 2, [4, 2])
_run_test([2, 2, 2], 1, [2, 4])
_run_test([2, 2, 2], 0, [1, 8])
_run_test([2, 2, 2], -1, [4, 2])
_run_test([2, 2, 2], -2, [2, 4])
_run_test([2, 2, 2], -3, [1, 8])
_run_test([2, 2, 2, 2], 3, [8, 2])
_run_test([2, 2, 2, 2], 2, [4, 4])
_run_test([2, 2, 2, 2], 1, [2, 8])
_run_test([2, 2, 2, 2], 0, [1, 16])
_run_test([2, 2, 2, 2], -1, [8, 2])
_run_test([2, 2, 2, 2], -2, [4, 4])
_run_test([2, 2, 2, 2], -3, [2, 8])
_run_test([2, 2, 2, 2], -4, [1, 16])
def test_dnnl_pooling_max(self):
dev = cpu_dev
N = 1
C = 3
H = 2
W = 2
data_shape = [N, C, H, W]
param_shape = [1, C, 1, 1]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x0 = np.array(data, dtype=np.float32).reshape(data_shape)
x0_ct = tensor.Tensor(device=dev, data=x0).data
dy0 = np.array([1, 2, 3], dtype=np.float32).reshape([1, 3, 1, 1])
dy0_ct = tensor.Tensor(device=dev, data=dy0).data
hndl = singa_api.PoolingHandle(x0_ct, [2, 2], [1, 1], [0, 0], True)
y0_ct = singa_api.CpuPoolingForward(hndl, x0_ct)
y1 = np.array([[[[4.]], [[8.]], [[12.]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(y0_ct)), y1)
dx0_ct = singa_api.CpuPoolingBackward(hndl, dy0_ct, x0_ct, y0_ct)
dx1 = np.array([[[[0., 0.], [0., 1.]], [[0., 0.], [0., 2.]],
[[0., 0.], [0., 3.]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(dx0_ct)), dx1)
def test_dnnl_pooling_avg(self):
dev = cpu_dev
N = 1
C = 3
H = 2
W = 2
data_shape = [N, C, H, W]
param_shape = [1, C, 1, 1]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x0 = np.array(data, dtype=np.float32).reshape(data_shape)
x0_ct = tensor.Tensor(device=dev, data=x0).data
dy0 = np.array([1, 2, 3], dtype=np.float32).reshape([1, 3, 1, 1])
dy0_ct = tensor.Tensor(device=dev, data=dy0).data
hndl = singa_api.PoolingHandle(x0_ct, [2, 2], [1, 1], [0, 0], False)
y0_ct = singa_api.CpuPoolingForward(hndl, x0_ct)
y1 = np.array([[[[2.5000]], [[6.5000]], [[10.5000]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(y0_ct)), y1)
dx0_ct = singa_api.CpuPoolingBackward(hndl, dy0_ct, x0_ct, y0_ct)
dx1 = np.array([[[[0.2500, 0.2500], [0.2500, 0.2500]],
[[0.5000, 0.5000], [0.5000, 0.5000]],
[[0.7500, 0.7500], [0.7500, 0.7500]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(dx0_ct)), dx1)
def _concat_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
ctensors = singa_api.VecTensor()
ctensors.append(t1.data)
ctensors.append(t2.data)
t3_ct = singa_api.ConcatOn(ctensors, 3)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t3_ct)), np3)
def test_concat_cpu(self):
self._concat_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concat_gpu(self):
self._concat_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2_ct = singa_api.Ceil(t1.data)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t2_ct)), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _as_type_helper(self, dev):
np1 = np.random.random([3]).astype(np.float32)
np1 = np1 * 10 - 5
np2 = np1.astype(np.int32)
np3 = np2.astype(np.float32)
t1 = tensor.Tensor(device=dev, data=np1)
t1 = tensor.Tensor(device=dev, data=np1)
t1_ct = t1.data
self.assertEqual(t1_ct.data_type(), singa_api.kFloat32)
t1_ct = t1_ct.AsType(singa_api.kInt)
self.assertEqual(t1_ct.data_type(), singa_api.kInt)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t1_ct)), np2)
t1_ct = t1_ct.AsType(singa_api.kFloat32)
self.assertEqual(t1_ct.data_type(), singa_api.kFloat32)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t1_ct)), np3)
def test_as_type_cpu(self):
self._as_type_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_as_type_gpu(self):
self._as_type_helper(gpu_dev)
def _as_type2_helper(self, dev):
shape1 = [1, 2, 3, 4]
shape2 = [4, 3, 2, 1]
np_int = np.random.randint(0, 10, shape1).astype(np.int32)
np_flt = np_int.astype(np.float32)
t1 = singa_api.Tensor(shape1, dev, singa_api.kInt)
t1.CopyIntDataFromHostPtr(np_int.flatten())
_ctensor_eq_ndarray(t1, np_int)
t1 = singa_api.Reshape(t1, shape2)
t2 = t1.AsType(singa_api.kFloat32)
_ctensor_eq_ndarray(t2, np_flt.reshape(shape2))
t3 = t2.AsType(singa_api.kInt)
_ctensor_eq_ndarray(t3, np_int.reshape(shape2))
t1 = singa_api.Reshape(t1, shape1)
t4 = t1.AsType(singa_api.kFloat32)
_ctensor_eq_ndarray(t4, np_flt.reshape(shape1))
def test_as_type2_cpu(self):
self._as_type2_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_as_type2_gpu(self):
self._as_type2_helper(gpu_dev)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import unicode_literals
import base64
from collections import OrderedDict
from datetime import timedelta
import logging
from django.core.exceptions import ImproperlyConfigured, MultipleObjectsReturned, ObjectDoesNotExist
from django.urls import reverse, Resolver404
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.fields import Field, ReadOnlyField
import six
from waldur_core.core import utils as core_utils
from waldur_core.core.fields import TimestampField
from waldur_core.core.signals import pre_serializer_fields
logger = logging.getLogger(__name__)
class AuthTokenSerializer(serializers.Serializer):
"""
API token serializer loosely based on DRF's default AuthTokenSerializer.
but with the logic of authorization is extracted to view.
"""
# Fields are both required, non-blank and don't allow nulls by default
username = serializers.CharField()
password = serializers.CharField()
class Base64Field(serializers.CharField):
def to_internal_value(self, data):
value = super(Base64Field, self).to_internal_value(data)
try:
base64.b64decode(value)
return value
except (TypeError, ValueError):
raise serializers.ValidationError(_('This field should a be valid Base64 encoded string.'))
def to_representation(self, value):
value = super(Base64Field, self).to_representation(value)
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return base64.b64encode(value)
class BasicInfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta(object):
fields = ('url', 'uuid', 'name')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class UnboundSerializerMethodField(ReadOnlyField):
"""
A field that gets its value by calling a provided filter callback.
"""
def __init__(self, filter_function, *args, **kwargs):
self.filter_function = filter_function
super(UnboundSerializerMethodField, self).__init__(*args, **kwargs)
def to_representation(self, value):
request = self.context.get('request')
return self.filter_function(value, request)
class GenericRelatedField(Field):
"""
A custom field to use for the `tagged_object` generic relationship.
"""
read_only = False
_default_view_name = '%(model_name)s-detail'
lookup_fields = ['uuid', 'pk']
def __init__(self, related_models=(), **kwargs):
super(GenericRelatedField, self).__init__(**kwargs)
self.related_models = related_models
def _get_url(self, obj):
"""
Gets object url
"""
format_kwargs = {
'app_label': obj._meta.app_label,
}
try:
format_kwargs['model_name'] = getattr(obj.__class__, 'get_url_name')()
except AttributeError:
format_kwargs['model_name'] = obj._meta.object_name.lower()
return self._default_view_name % format_kwargs
def _get_request(self):
try:
return self.context['request']
except KeyError:
raise AttributeError('GenericRelatedField have to be initialized with `request` in context')
def to_representation(self, obj):
"""
Serializes any object to his url representation
"""
kwargs = None
for field in self.lookup_fields:
if hasattr(obj, field):
kwargs = {field: getattr(obj, field)}
break
if kwargs is None:
raise AttributeError('Related object does not have any of lookup_fields')
request = self._get_request()
return request.build_absolute_uri(reverse(self._get_url(obj), kwargs=kwargs))
def to_internal_value(self, data):
"""
Restores model instance from its url
"""
if not data:
return None
request = self._get_request()
user = request.user
try:
obj = core_utils.instance_from_url(data, user=user)
model = obj.__class__
except ValueError:
raise serializers.ValidationError(_('URL is invalid: %s.') % data)
except (Resolver404, AttributeError, MultipleObjectsReturned, ObjectDoesNotExist):
raise serializers.ValidationError(_("Can't restore object from url: %s") % data)
if model not in self.related_models:
raise serializers.ValidationError(_('%s object does not support such relationship.') % six.text_type(obj))
return obj
class AugmentedSerializerMixin(object):
"""
This mixin provides several extensions to stock Serializer class:
1. Add extra fields to serializer from dependent applications in a way
that doesn't introduce circular dependencies.
To achieve this, dependent application should subscribe
to pre_serializer_fields signal and inject additional fields.
Example of signal handler implementation:
from waldur_core.structure.serializers import CustomerSerializer
def add_customer_name(sender, fields, **kwargs):
fields['customer_name'] = ReadOnlyField(source='customer.name')
pre_serializer_fields.connect(
handlers.add_customer_name,
sender=CustomerSerializer
)
2. Declaratively add attributes fields of related entities for ModelSerializers.
To achieve list related fields whose attributes you want to include.
Example:
class ProjectSerializer(AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.Project
fields = (
'url', 'uuid', 'name',
'customer', 'customer_uuid', 'customer_name',
)
related_paths = ('customer',)
# This is equivalent to listing the fields explicitly,
# by default "uuid" and "name" fields of related object are added:
class ProjectSerializer(AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
customer_uuid = serializers.ReadOnlyField(source='customer.uuid')
customer_name = serializers.ReadOnlyField(source='customer.name')
class Meta(object):
model = models.Project
fields = (
'url', 'uuid', 'name',
'customer', 'customer_uuid', 'customer_name',
)
lookup_field = 'uuid'
# The fields of related object can be customized:
class ProjectSerializer(AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.Project
fields = (
'url', 'uuid', 'name',
'customer', 'customer_uuid',
'customer_name', 'customer_native_name',
)
related_paths = {
'customer': ('uuid', 'name', 'native_name')
}
3. Protect some fields from change.
Example:
class ProjectSerializer(AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.Project
fields = ('url', 'uuid', 'name', 'customer')
protected_fields = ('customer',)
4. This mixin overrides "get_extra_kwargs" method and puts "view_name" to extra_kwargs
or uses URL name specified in a model of serialized object.
"""
def get_fields(self):
fields = super(AugmentedSerializerMixin, self).get_fields()
pre_serializer_fields.send(sender=self.__class__, fields=fields)
try:
protected_fields = self.Meta.protected_fields
except AttributeError:
pass
else:
try:
method = self.context['view'].request.method
except (KeyError, AttributeError):
return fields
if method in ('PUT', 'PATCH'):
for field in protected_fields:
fields[field].read_only = True
return fields
def _get_related_paths(self):
try:
related_paths = self.Meta.related_paths
except AttributeError:
return {}
if not isinstance(self, serializers.ModelSerializer):
raise ImproperlyConfigured(
'related_paths can be defined only for ModelSerializer.'
)
if isinstance(related_paths, (list, tuple)):
related_paths = {path: ('name', 'uuid') for path in related_paths}
return related_paths
def build_unknown_field(self, field_name, model_class):
related_paths = self._get_related_paths()
related_field_source_map = {
'{0}_{1}'.format(path.split('.')[-1], attribute): '{0}.{1}'.format(path, attribute)
for path, attributes in related_paths.items()
for attribute in attributes
}
try:
return serializers.ReadOnlyField, {'source': related_field_source_map[field_name]}
except KeyError:
return super(AugmentedSerializerMixin, self).build_unknown_field(field_name, model_class)
def get_extra_kwargs(self):
extra_kwargs = super(AugmentedSerializerMixin, self).get_extra_kwargs()
if hasattr(self.Meta, 'view_name'):
view_name = self.Meta.view_name
else:
view_name = core_utils.get_detail_view_name(self.Meta.model)
if 'url' in extra_kwargs:
extra_kwargs['url']['view_name'] = view_name
else:
extra_kwargs['url'] = {'view_name': view_name}
return extra_kwargs
class RestrictedSerializerMixin(object):
"""
This mixin allows to specify list of fields to be rendered by serializer.
It expects that request is available in serializer's context.
"""
FIELDS_PARAM_NAME = 'field'
def get_fields(self):
fields = super(RestrictedSerializerMixin, self).get_fields()
if 'request' not in self.context:
return fields
query_params = self.context['request'].query_params
keys = query_params.getlist(self.FIELDS_PARAM_NAME)
keys = set(key for key in keys if key in fields.keys())
if not keys:
return fields
return OrderedDict(((key, value) for key, value in fields.items() if key in keys))
class RequiredFieldsMixin(object):
"""
This mixin allows to specify list of required fields.
It expects list of field names as Meta.required_fields attribute.
"""
def get_fields(self):
fields = super(RequiredFieldsMixin, self).get_fields()
required_fields = getattr(self.Meta, 'required_fields') or []
for name in required_fields:
field = fields.get(name)
if field:
field.required = True
return fields
class ExtraFieldOptionsMixin(object):
"""
This mixin allows to specify extra fields metadata.
It expects dictionary of field name and options as Meta.extra_field_options attribute.
"""
def get_fields(self):
fields = super(ExtraFieldOptionsMixin, self).get_fields()
extra_field_options = getattr(self.Meta, 'extra_field_options') or {}
for name, options in extra_field_options.items():
field = fields.get(name)
if field:
for key, val in options.items():
setattr(field, key, val)
return fields
class HyperlinkedRelatedModelSerializer(serializers.HyperlinkedModelSerializer):
def __init__(self, **kwargs):
self.queryset = kwargs.pop('queryset', None)
assert self.queryset is not None or kwargs.get('read_only', None), (
'Relational field must provide a `queryset` argument, '
'or set read_only=`True`.'
)
assert not (self.queryset is not None and kwargs.get('read_only', None)), (
'Relational fields should not provide a `queryset` argument, '
'when setting read_only=`True`.'
)
super(HyperlinkedRelatedModelSerializer, self).__init__(**kwargs)
def to_internal_value(self, data):
if 'url' not in data:
raise serializers.ValidationError(_('URL has to be defined for related object.'))
url_field = self.fields['url']
# This is tricky: self.fields['url'] is the one generated
# based on Meta.fields.
# By default ModelSerializer generates it as HyperlinkedIdentityField,
# which is read-only, thus it doesn't get deserialized from POST body.
# So, we "borrow" its view_name and lookup_field to create
# a HyperlinkedRelatedField which can turn url into a proper model
# instance.
url = serializers.HyperlinkedRelatedField(
queryset=self.queryset.all(),
view_name=url_field.view_name,
lookup_field=url_field.lookup_field,
)
return url.to_internal_value(data['url'])
class TimestampIntervalSerializer(serializers.Serializer):
start = TimestampField(required=False)
end = TimestampField(required=False)
def validate(self, data):
"""
Check that the start is before the end.
"""
if 'start' in data and 'end' in data and data['start'] >= data['end']:
raise serializers.ValidationError(_('End must occur after start.'))
return data
# TimeInterval serializer is used for validation only. We are providing custom method for such serializers
# to avoid confusion with to_internal_value or to_representation DRF methods.
def get_filter_data(self):
""" Return start and end as datetime """
return self.validated_data
class HistorySerializer(serializers.Serializer):
"""
Receive datetime as timestamps and converts them to list of datetimes
Support 2 types of input data:
- start, end and points_count - interval from <start> to <end> will be automatically split into
<points_count> pieces
- point_list - list of timestamps that will be converted to datetime points
"""
start = TimestampField(required=False)
end = TimestampField(required=False)
points_count = serializers.IntegerField(min_value=2, required=False)
point_list = serializers.ListField(
child=TimestampField(),
required=False
)
def validate(self, attrs):
autosplit_fields = {'start', 'end', 'points_count'}
if ('point_list' not in attrs or not attrs['point_list']) and not autosplit_fields == set(attrs.keys()):
raise serializers.ValidationError(
_('Not enough parameters for historical data. '
'(Either "point" or "start" + "end" + "points_count" parameters have to be provided).'))
if 'point_list' in attrs and autosplit_fields & set(attrs.keys()):
raise serializers.ValidationError(
_('Too many parameters for historical data. '
'(Either "point" or "start" + "end" + "points_count" parameters have to be provided).'))
if 'point_list' not in attrs and not attrs['start'] < attrs['end']:
raise serializers.ValidationError(_('Start timestamps have to be later than end timestamps.'))
return attrs
# History serializer is used for validation only. We are providing custom method for such serializers
# to avoid confusion with to_internal_value or to_representation DRF methods.
def get_filter_data(self):
if 'point_list' in self.validated_data:
return self.validated_data['point_list']
else:
interval = ((self.validated_data['end'] - self.validated_data['start']) /
(self.validated_data['points_count'] - 1))
return [self.validated_data['start'] + interval * i for i in range(self.validated_data['points_count'])]
class TimelineSerializer(serializers.Serializer):
INTERVAL_CHOICES = ('hour', 'day', 'week', 'month')
start_time = TimestampField(default=lambda: core_utils.timeshift(days=-1))
end_time = TimestampField(default=lambda: core_utils.timeshift())
interval = serializers.ChoiceField(choices=INTERVAL_CHOICES, default='day')
def get_date_points(self):
start_time = self.validated_data['start_time']
end_time = self.validated_data['end_time']
interval = self.validated_data['interval']
if interval == 'hour':
start_point = start_time.replace(second=0, minute=0, microsecond=0)
interval = timedelta(hours=1)
elif interval == 'day':
start_point = start_time.replace(hour=0, second=0, minute=0, microsecond=0)
interval = timedelta(days=1)
elif interval == 'week':
start_point = start_time.replace(hour=0, second=0, minute=0, microsecond=0)
interval = timedelta(days=7)
elif interval == 'month':
start_point = start_time.replace(hour=0, second=0, minute=0, microsecond=0)
interval = timedelta(days=30)
points = [start_time]
current_point = start_point
while current_point <= end_time:
points.append(current_point)
current_point += interval
if points[-1] != end_time:
points.append(end_time)
return [p for p in points if start_time <= p <= end_time]
class BaseSummarySerializer(serializers.Serializer):
""" Serializer that renders each instance with its own specific serializer """
@classmethod
def get_serializer(cls, model):
raise NotImplementedError('Method `get_serializer` should be implemented for SummarySerializer.')
@classmethod
def eager_load(cls, summary_queryset):
optimized_querysets = []
for queryset in summary_queryset.querysets:
serializer = cls.get_serializer(queryset.model)
optimized_querysets.append(serializer.eager_load(queryset))
summary_queryset.querysets = optimized_querysets
return summary_queryset
def to_representation(self, instance):
serializer = self.get_serializer(instance.__class__)
return serializer(instance, context=self.context).data
|
|
"""
Updates made:
- Fixed Rotation
Updates to be made:
repeated code needs to be made into functions:
- startCol and startRow
- dividing check win into check color win
Safe vault for bad inputs
diagonal check for win
make min max heuristic
Add alpha beta pruning
improve till you run out of time.
"""
import random
import math
def copyGrid(grid):
#To start a clean slate. i.e to ensure that change in newgrid doesn't change grid.
#make a copy grid function for this.
newgrid = [['.' for x in range(6)] for x in range(6)]
for i in range(6):
for j in range(6):
newgrid[i][j] = grid[i][j]
return newgrid
def rotate(grid, move):
startCol = math.floor(((move[0] - 1)%2)*3)
startRow = math.floor(((move[0] - 1)/2)*3)
#For 4 and 2
if move[0] % 2 is 0:
startRow = startRow - 1
newgrid = copyGrid(grid)
if move[1] is 'R':
for i in range(startRow, startRow+3):
for j in range(startCol, startCol+3):
newgrid[j + startRow - startCol][2 - i + startRow +startCol] = grid [i][j]
elif move[1] is 'L':
for i in range(startRow, startRow+3):
for j in range(startCol, startCol+3):
newgrid[2 - j + startRow + startCol][i - startRow + startCol] = grid [i][j]
return newgrid
def makeMove(grid, color, move):
startCol = math.floor(((move[0] - 1)%2)*3)
startRow = math.floor(((move[0] - 1)/2)*3)
if move[0] % 2 is 0:
startRow = startRow - 1
if move[1] <= 3:
if grid[startRow][(startCol+move[1] - 1)] is '.':
grid[startRow][(startCol+move[1] - 1)] = color
elif move[1] <= 6:
if grid[startRow+1][(startCol+move[1] - 4)] is '.':
grid[startRow+1][(startCol+move[1] - 4)] = color
elif move[1] <= 9:
if grid[startRow+2][(startCol+move[1]) - 7] is '.':
grid[startRow+2][(startCol+move[1]) - 7] = color
return
def checkWinColor(c, grid):
win = False
for i in range(6):
for j in range(6):
if grid[i][j] is c:
if j < 2 and i >=2 :
win = (grid[i][j] is grid[i][j+1] is grid[i][j+2] is grid[i][j+3] is grid[i][j+4] is c)
elif j < 2:
win = ((grid[i][j] is grid[i+1][j+1] is grid[i+2][j+2] is grid[i+3][j+3] is grid[i+4][j+4] is c) or win)
elif i < 2:
win = ((grid[i][j] is grid[i+1][j] is grid[i+2][j] is grid[i+3][j] is grid[i+4][j] is c) or win )
break
return win
def checkWin(grid):
winW = checkWinColor('w', grid)
winB = checkWinColor('b', grid)
if winW and (not winB):
return "P1 wins!"
elif winW:
return "A Tie"
elif winB:
return "P2 Wins!"
if '.' not in (i[0] for i in initialGrid):
return "A Tie!"
return None
def scoreFrom(playscore, color, grid, pointer):
if pointer[0] >= 6 or pointer[1] >= 6:
return playscore
if playscore >= 5:
return playscore
if grid[pointer[0]][pointer[1]] is not color:
return playscore
else:
scoreh = scoreFrom(playscore+1, color, grid, ((pointer[0]+1),(pointer[1])))
scorev = scoreFrom(playscore+1, color, grid, ((pointer[0]),(pointer[1]+1)))
scored = scoreFrom(playscore+1, color, grid, ((pointer[0]+1),(pointer[1]+1)))
return max(scoreh, scorev, scored, playscore)
def scoreGrid(color, grid):
playscore = 0
for i in range(6):
for j in range(6):
temp = 0
if grid[i][j] is color:
temp = scoreFrom(0, color, grid, (i,j))
playscore = max(playscore, temp)
return playscore
def allPossibleMoves(grid):
moves = []
block = 0
pos = 0
for i in range(6):
for j in range(6):
if grid[i][j] is '.':
if j < 3:
if i < 3:
block = 1
else:
block = 3
else:
if i < 3:
block = 2
else:
block = 4
startRow = math.floor(((block - 1)/2)*3)
startCol = math.floor(((block - 1)%2)*3)
if block%2 is 0:
startRow = startRow - 1
pos = (3*(i - startRow))+(j - startCol)+1
for x in [1,2,3,4]:
moves.append(((block,pos),(x,'L')))
moves.append(((block,pos),(x,'R')))
return moves
"""Move generator using MinMax Algorithm"""
def genMinMax(moves, playerno, grid, depth, alpha, beta, maximizing):
colors = ['b','w']
if moves is None or moves is []:
moves = []
moves.append(genMoveRandom(grid))
if depth is 0:
return (moves[-1], (scoreGrid(colors[playerno-1],grid)))
if checkWin(grid) is not None:
return (moves[-1], 5)
if maximizing:
bestScoreAndMove = (genMoveRandom(grid), -5)
for move in allPossibleMoves(grid):
tempGrid = copyGrid(grid)
makeMove( tempGrid, colors[playerno-1], move[0])
tempGrid = rotate(tempGrid, move[1])
moves.append(move)
score = genMinMax(moves, playerno, tempGrid, depth - 1, alpha, beta, False)[1]
if bestScoreAndMove[1] < score:
bestScoreAndMove = (move, score)
alpha = max(score, beta)
if beta <= alpha:
break
return bestScoreAndMove
else:
bestScoreAndMove = (genMoveRandom(grid),5)
for move in allPossibleMoves(grid):
if moves is None:
moves = []
tempGrid = copyGrid(grid)
makeMove(tempGrid, colors[-(playerno)], move[0])
tempGrid = rotate(tempGrid, move[1])
moves = moves.append(move)
score = genMinMax(moves, playerno, tempGrid, depth - 1, alpha, beta, True)[1]
if bestScoreAndMove[1] > score:
bestScoreAndMove = (move, score)
beta = min(score, beta)
if beta <= alpha:
break
return bestScoreAndMove
"""Random move generator """
def genMoveRandom(grid):
dropBlock = random.choice(range(1,4))
dropPos = random.choice(range(1,9))
valid = False
while not valid:
startCol = math.floor(((dropBlock - 1)%2)*3)
startRow = math.floor(((dropBlock - 1)/2)*3)
if dropBlock % 2 is 0:
startRow = startRow - 1
if dropPos <= 3:
if grid[startRow][(startCol+dropPos - 1)] is '.':
valid = True
break
elif dropPos <= 6:
if grid[startRow+1][(startCol+dropPos - 4)] is '.':
valid = True
break
elif dropPos <= 9:
if grid[startRow+2][(startCol+dropPos - 7)] is '.':
valid = True
break
dropBlock = random.choice(range(1,4))
dropPos = random.choice(range(1,9))
rotBlock = random.choice(range(1,4))
rotDir = random.choice(['L','R'])
move = ((dropBlock , dropPos),( rotBlock, rotDir ))
return move
def printGrid(grid):
row=''
for i in range(6):
if i is 3:
print('=======')
for j in range(6):
if j is 3:
row+= '|'
row+=grid[i][j]
print(row)
row = ''
print('--------------------------')
""" The main function that will call all the other functions"""
def input2move(input):
temp = input.split(' ')
move = []
move.append(temp[0].split('/'))
move.append(list(temp[1]))
move[0] = [ int(x) for x in move[0] ]
move[1][0] = int(move[1][0])
return move
def Pentago(userplayer, p1, p2, grid):
if checkWin(grid) is not None:
print(checkWin(grid))
return
print("Current grid")
printGrid(grid)
updateGrid = grid
currmove = []
#player 1 move
if userplayer is 1:
print(p1,"Make your move: ")
inputmove = input()
currmove = input2move(inputmove)
else:
print(p1," is making it's move")
currmove = genMinMax(list(), userplayer, updateGrid, 3, -5, 5, True)[0]
print(currmove)
makeMove(updateGrid, 'w' , currmove[0])
updateGrid = rotate(updateGrid , currmove[1])
printGrid(updateGrid)
if checkWin(updateGrid) is not None:
print(checkWin(updateGrid))
return
#player 2 move
if userplayer is 2:
print(p2,"Make your move: ")
inputmove = input()
currmove = input2move(inputmove)
else:
print(p2," is making it's move")
currmove = genMinMax(list(), userplayer, updateGrid, 3, -5, 5, True)[0]
print(currmove)
makeMove(updateGrid, 'b' , currmove[0])
updateGrid = rotate(updateGrid , currmove[1])
if checkWin(updateGrid) is not None:
print(checkWin(updateGrid))
return
Pentago(userplayer, p1, p2, updateGrid)
return
print("The starting grid is")
initialGrid = [['.' for x in range(6)] for x in range(6)]
printGrid(initialGrid)
userplayer = int(input("Do you wish to be player 1 or 2:"))
p1 = input('Enter a name of player 1: ')
p2 = input('Enter a name of player 2: ')
Pentago(userplayer, p1, p2, initialGrid)
#genMinMax(moves = list(), playerno = 1, grid = initialGrid, depth = 3, alpha = -5, beta = 5, maximizing = True)
|
|
import xml.etree.ElementTree as ET
from xml.etree import ElementTree
from os.path import sep
import csv
import os
import os.path
import sys
import shutil
# instance and name must be the first two items
resource_keys = { "db": [ "instance", "name", "initialSize", "maxActive", "maxIdle", "minIdle", "defaultAutoCommit", "driverClassName", "fairQueue", "jmxEnabled", "logAbandoned", "maxWait", "minEvictableIdleTimeMillis", "username", "password", "removeAbandoned", "removeAbandonedTimeout", "testOnBorrow", "testOnReturn", "testWhileIdle", "timeBetweenEvictionRunsMillis", "type", "url", "useEquals", "validationQuery", "factory", "jdbcInterceptors" ], "mail": [ "instance", "name", "mail.smtp.auth","mail.smtp.host","mail.smtp.port","mail.smtp.user","mail.user","username","mail.smtp.password","mail.password", "password","mail.smtp.socketFactory.class","mail.smtp.starttls.enable", "factory" ], "rmi": [ "instance", "name", "port", "factory" ], "env": [ "instance", "name", "value" ], "connector": [ "instance", "shutdown", "HTTP/1.1", "AJP/1.3" ] }
resource_types = { "db": ["javax.sql.DataSource", "javax.sql.XADataSource"], "mail" : ["javax.mail.Session"], "rmi" : ["java.rmi.registry.Registry"], "mq": ["com.sun.messaging.Queue"], "env": ["java.lang.String"] }
# "attrs" contains static, additional attributes to the tag
resource_tags = { "db" : {"tag": "Resource", "attrs": {"auth": "Container"}}, "mail": {"tag": "Resource", "attrs": {"auth": "Container"}}, "rmi": {"tag": "Resource", "attrs": {"auth": "Container"}}, "mq": {"tag": "Resource", "attrs": {"auth": "Container"}}, "env": {"tag": "Environment"} }
class CommentedTreeBuilder(ElementTree.TreeBuilder):
def comment(self, data):
self.start(ElementTree.Comment, {})
self.data(data)
self.end(ElementTree.Comment)
def walk_instances(resource_type = "db", mode = None):
print("Resource type is " + resource_type)
if resource_type is not None and resource_type.startswith("env"):
tag = "Environment"
else:
tag = "Resource"
if mode == "update":
csvFile = open("resources-" + resource_type + ".csv", "rb")
reader = csv.reader(csvFile)
result = {}
oldInst = None
first = True
for row in reader:
print("row = " + str(row))
if first:
read_header(resource_type, row)
first = False
continue
if len(row) != len(resource_keys[resource_type]):
print("incomplete row ( " + str(len(row)) + " vs " + str(len(resource_keys[resource_type])) + " ) " + str(row))
continue
inst = row[0]
if oldInst is None:
oldInst = inst
items = {}
if oldInst != inst:
print("writing " + oldInst + " with " + str(items))
result[oldInst] = items
items = {}
item = {}
i = 0
for key in resource_keys[resource_type]:
# start after the name
if i >= 2:
item[key] = row[i]
i += 1
#if oldInst == inst:
items[row[1]] = item
oldInst = inst
print("last writing " + str(oldInst) + " with " + str(items))
result[oldInst] = items
#return
print("result = " + str(result))
execute_fn = update_tomcat_pool
create_fn = create_tomcat_pool
else:
csvFile = open("resources-" + resource_type + ".csv", "wb")
result = csv.writer(csvFile)
result.writerow(resource_keys[resource_type])
execute_fn = parse_tomcat_pool
resource_keys[resource_type].pop(0)
instances = os.listdir(".")
for instance in instances:
modified = False
serverFile = check_config_presence(instance)
if serverFile == "":
continue
print("processing " + instance)
tree = ET.parse(serverFile, parser=ElementTree.XMLParser(target=CommentedTreeBuilder()))
root = tree.getroot()
globalNamingResource = root.find("GlobalNamingResources")
for resource in globalNamingResource.findall(tag):
#print("found resource " + str(resource))
type = resource.get("type")
if type is not None:
possible = resource_types[resource_type]
#if type == "javax.sql.DataSource" or type == "javax.sql.XADataSource":
if type in possible:
modified = execute_fn(resource_type, instance, result, resource)
else:
print(str(resource))
if mode == "update" and create_fn is not None:
resource_type_type = resource_types[resource_type][0]
create_fn(resource_type_type, resource_tags[resource_type], instance, result, globalNamingResource)
modified = True
if mode == "update" and modified:
update(tree, instance, serverFile)
def read_header(resource_type, row):
if row[0] != "instance":
return
resource_keys[resource_type] = row
print "read header [{}]: {}".format(resource_type, resource_keys[resource_type])
def create_tomcat_pool(resource_type_type, resource_tag, instance, result, globalNamingResource):
print "yet unprocessed resources: {}".format(result)
if instance not in result:
return
# resources not present - these need to be inserted
resources = result[instance]
for name, resource_attrs in resources.items():
attrs = { "name" : name }
if "attrs" in resource_tag:
attrs.update(resource_tag["attrs"])
non_empty_attrs = dict((k, v) for k, v in resource_attrs.iteritems() if v)
attrs.update(non_empty_attrs)
if "type" not in attrs:
attrs.update({"type": resource_type_type})
print("creating " + str(resource_tag["tag"]) + " with: " + str(attrs))
ET.SubElement(globalNamingResource, resource_tag["tag"], attrs)
def update(tree, instance, serverFile):
# for test purposes
#tree.write(instance + "-server.xml")
# for real replacement of server.xml
tree.write(serverFile + ".new")
shutil.move(serverFile, serverFile + ".bak")
shutil.move(serverFile + ".new", serverFile)
def parse_tomcat_pool(resource_type, instance, result, resource):
item = [instance]
for key in resource_keys[resource_type]:
item.append(resource.get(key))
result.writerow(item)
def update_tomcat_pool(resource_type, instance, result, resource):
if instance not in result:
print("No " + instance + " instance entry")
return False
items = result[instance]
name = resource.get("name")
datasource = items[name]
if datasource is None:
print("There is no such datasource " + name)
return False
modified = False
for key in resource_keys[resource_type]:
if key == "name":
continue
value = datasource[key]
if value is None or value == "":
print("There is no such value for key " + key + " in " + name)
continue
print("Setting resource " + key + " = " + value)
resource.set(key, value)
modified = True
if modified:
del items[name]
return modified
def check_config_presence(instance):
if not os.path.isdir(instance):
return ""
serverFile = instance + sep + "conf" + sep + "server.xml"
if not os.path.isfile(serverFile):
print(serverFile + " is not file")
return ""
return serverFile
def walk_connectors(resource_type = "connector", mode = None):
if mode == "update":
csvFile = open("resources-" + resource_type + ".csv", "rb")
reader = csv.reader(csvFile)
result = {}
oldInst = None
first = True
for row in reader:
print("row = " + str(row))
if first:
first = False
continue
if len(row) != len(resource_keys[resource_type]):
print("incomplete row " + str(row))
continue
inst = row[0]
if oldInst is None:
oldInst = inst
items = {}
if oldInst != inst:
print("writing " + oldInst + " with " + str(items))
result[oldInst] = items
items = {}
i = 0
for key in resource_keys[resource_type]:
# start after the instance
if i >= 1:
items[key] = row[i]
i += 1
#if oldInst == inst:
oldInst = inst
print("last writing " + str(oldInst) + " with " + str(items))
result[oldInst] = items
#print("result = " + str(result))
#return
prepare_fn = prepare_update_connector
execute_fn = update_connector
else:
csvFile = open("resources-" + resource_type + ".csv", "wb")
result = csv.writer(csvFile)
result.writerow(resource_keys[resource_type])
prepare_fn = prepare_read_connector
execute_fn = read_connector
resource_keys[resource_type].pop(0)
instances = os.listdir(".")
for instance in instances:
modified = False
serverFile = check_config_presence(instance)
if serverFile == "":
continue
print("processing " + instance)
tree = ET.parse(serverFile, parser=ElementTree.XMLParser(target=CommentedTreeBuilder()))
root = tree.getroot()
service = root.find("Service")
item = []
prepare_fn(root, instance, result, item)
for resource in service.findall("Connector"):
protocol = resource.get("protocol")
#print("protocol", protocol)
modified = execute_fn(resource, result, instance, item, protocol)
if mode == "update" and modified:
update(tree, instance, serverFile)
def prepare_update_connector(root, instance, result, item):
root.set("port", result[instance]["shutdown"])
def prepare_read_connector(root, instance, result, item):
item.append(instance)
item.append(root.get("port"))
def read_connector(resource, result, instance, item, protocol):
item.append(resource.get("port"))
result.writerow(item)
return False
def update_connector(resource, result, instance, item, protocol):
if protocol not in result[instance]:
return
resource.set("port", result[instance][protocol])
return True
if __name__ == '__main__':
#print dirname(sys.argv[0])
args = sys.argv[1:]
if len(args) == 1:
if args[0] == "connector":
walk_connectors(args[0])
else:
walk_instances(args[0])
elif len(args) == 2:
if args[0] == "connector":
walk_connectors(args[0], args[1])
else:
walk_instances(args[0], args[1])
else:
walk_instances()
|
|
from __future__ import unicode_literals
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
url_basename,
)
class NPOBaseIE(SubtitlesInfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
return self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
class NPOIE(NPOBaseIE):
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/(?!live|radio)[^/]+/[^/]+/(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = 'http://e.omroep.nl/tt888/%s' % video_id
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
return {
'id': video_id,
'title': metadata['titel'],
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
if stream_type == 'ss':
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
transform_source=strip_jsonp)
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class TegenlichtVproIE(NPOIE):
IE_NAME = 'tegenlicht.vpro.nl'
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
},
},
]
def _real_extract(self, url):
name = url_basename(url)
webpage = self._download_webpage(url, name)
urn = self._html_search_meta('mediaurn', webpage)
info_page = self._download_json(
'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
return self._get_info(info_page['mid'])
|
|
from TASSELpy.utils.helper import make_sig
from TASSELpy.utils.Overloading import javaOverload, javaConstructorOverload, javaStaticOverload
from TASSELpy.java.lang.Object import Object
from TASSELpy.java.lang.String import metaString
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.net.maizegenetics.dna.snp.genotypecall.GenotypeMergeRule import GenotypeMergeRule
from TASSELpy.net.maizegenetics.dna.GenotypeTable import GenotypeTable
from TASSELpy.net.maizegenetics.dna.snp.genotypecall.GenotypeCallTable import GenotypeCallTable
from TASSELpy.net.maizegenetics.dna.map.PositionList import PositionList
from TASSELpy.net.maizegenetics.taxa.TaxaList import TaxaList
from TASSELpy.net.maizegenetics.dna.snp.score.AlleleProbability import AlleleProbability
from TASSELpy.net.maizegenetics.dna.snp.depth.AlleleDepth import AlleleDepth
from TASSELpy.net.maizegenetics.dna.snp.score.Dosage import Dosage
from TASSELpy.net.maizegenetics.util.GeneralAnnotationStorage import GeneralAnnotationStorage
from TASSELpy.net.maizegenetics.dna.snp.score.ReferenceProbability import ReferenceProbability
java_imports = {'AlleleDepth':'net/maizegenetics/dna/snp/depth/AlleleDepth',
'AlleleProbability':'net/maizegenetics/dna/snp/score/AlleleProbability',
'Dosage':'net/maizegenetics/dna/snp/score/Dosage',
'GeneralAnnotationStorage':'net/maizegenetics/util/GeneralAnnotationStorage',
'GenotypeCallTable':'net/maizegenetics/dna/snp/genotypecall/GenotypeCallTable',
'GenotypeMergeRule':'net/maizegenetics/dna/snp/genotypecall/GenotypeMergeRule',
'GenotypeTable':'net/maizegenetics/dna/snp/GenotypeTable',
'GenotypeTableBuilder':'net/maizegenetics/dna/snp/GenotypeTableBuilder',
'PositionList':'net/maizegenetics/dna/map/PositionList',
'ReferenceProbability':'net/maizegenetics/dna/snp/score/ReferenceProbability',
'String':'java/lang/String',
'TaxaList':'net/maizegenetics/taxa/TaxaList'}
class GenotypeTableBuilder(Object):
""" Builder for GenotypeTables
New genotypeTables are built from a minimum of TaxaList, PositionList,
and GenotypeCallTable. Depth and Scores are optional features of GenotypeTables.
If you know the taxa, position, and genotypes are known from the beginning,
use GenotypeTable(a = GenotypeTableBuilder.getInstance(genotype, positionList, taxaList))
In many situations, only GenotypeTables are built incrementally, either by Taxa or Site
In many cases, genotype want to add taxa to an existing genotypeTable. Direct
addition is not possible, as GenotypeTables are immutable, but the
GenotypeTableBuilder.getTaxaIncremental provides a strategy for creating and merging taxa
together. Key to the process is that GenotypeMergeRule defines how the taxa with identical
names will be merged. Merging is possible with HDF5 files, but only if the closeUnfinished() method
was used with the previous building
"""
_java_name = java_imports['GenotypeTableBuilder']
@javaConstructorOverload(java_imports['GenotypeTableBuilder'])
def __init__(self, *args, **kwargs):
pass
@javaStaticOverload(java_imports['GenotypeTableBuilder'], 'getBuilder',
(make_sig([java_imports['String']], java_imports['GenotypeTableBuilder']),
(metaString), lambda x: GenotypeTableBuilder(obj=x)))
def getBuilder(*args):
""" Returns a builder to an existing, unfinished HDF5 genotypes file
Can be used if you want to add/modify annotations, etc, and/or
call build() to finalize it
Signatures:
static GenotypeTableBuilder getBuilder(String existingHDF5File)
Arguments:
existingHDF5File -- The name of the HDF5 file containining the genotypes
Returns:
A GenotypeTableBuilder working off of the existing genotypes
in the HDF5 file
"""
pass
@javaStaticOverload(java_imports['GenotypeTableBuilder'],"getTaxaIncremental",
(make_sig([java_imports['PositionList']],java_imports['GenotypeTableBuilder']),
(PositionList,), lambda x: GenotypeTableBuilder(obj=x)),
(make_sig([java_imports['PositionList'],java_imports['GenotypeMergeRule']],
java_imports['GenotypeTableBuilder']),(PositionList,GenotypeMergeRule),
lambda x: GenotypeTableBuilder(obj=x)),
(make_sig([java_imports['GenotypeTable'],java_imports['GenotypeMergeRule']],
java_imports['GenotypeTableBuilder']),(GenotypeTable, GenotypeMergeRule),
lambda x: GenotypeTableBuilder(obj=x)),
(make_sig([java_imports['PositionList'],java_imports['String']],
java_imports['GenotypeTableBuilder']), (PositionList, metaString),
lambda x: GenotypeTableBuilder(obj=x)))
def getTaxaIncremental(*args):
""" Creates a builder allowing addition by taxa.
Signatures:
static GenotypeTableBuilder getTaxaIncremental(PositionList positionList)
static GenotypeTableBuilder getTaxaIncremental(PositionList positionList, GenotypeMergeRule mergeRule)
static GenotypeTableBuilder getTaxaIncremental(GenotypeTable genotypeTable, GenotypeMergeRule mergeRule)
static GenotypeTableBuilder getTaxaIncremental(PositionList positionList, String newHDF5File)
Arguments:
static GenotypeTableBuilder getTaxaIncremental(PositionList positionList)
positionList -- The positions used for the builder
static GenotypeTableBuilder getTaxaIncremental(PositionList positionList, GenotypeMergeRule mergeRule)
positionList -- The positions used for the builder
mergeRule -- rules for merging identically named taxa
static GenotypeTableBuilder getTaxaIncremental(GenotypeTable genotypeTable, GenotypeMergeRule mergeRule)
genotypeTable -- input genotype table
mergeRule -- rules for mergin identically named taxa
static GenotypeTableBuilder getTaxaIncremental(PositionList positionList, String newHDF5File)
positionList -- the defined list of positions
newHDF5File -- hdf5 file to be created
Returns:
Builder to add taxa to
"""
pass
@javaStaticOverload(java_imports['GenotypeTableBuilder'],"mergeTaxaIncremental",
(make_sig([java_imports['String'],java_imports['GenotypeMergeRule']],
java_imports['GenotypeTableBuilder']),(metaString, GenotypeMergeRule),
lambda x: GenotypeTableBuilder(obj=x)))
def mergeTaxaIncremental(*args):
""" Merges taxa to an existing HDF5 file.
The position list is derived from the positions already in the
existing HDF5 file
Signatures:
static GenotypeTableBuilder mergeTaxaIncremental(String existingHDF5File,
GenotypeMergeRule mergeRule)
Arguments:
static GenotypeTableBuilder mergeTaxaIncremental(String existingHDF5File,
GenotypeMergeRule mergeRule)
existingHDF5File -- An estigin HDF5 file containing positions
mergeRule -- rule for gergin taxa
Returns:
Builder to merge taxa with
"""
pass
@javaStaticOverload(java_imports['GenotypeTableBuilder'],'getTaxaIncrementalWithMerging',
(make_sig([java_imports['String'],java_imports['PositionList'],
java_imports['GenotypeMergeRule']], java_imports['GenotypeTableBuilder']),
(metaString, PositionList, GenotypeMergeRule), lambda x: GenotypeTableBuilder(obj=x)))
def getTaxaIncrementalWithMerging(*args):
""" Creates a new taxa incremental HDF5 GenotypeTableBuilder to which
replicate taxa can be added
Signatures:
static GenotypeTableBuilder getTaxaIncrementalWithMerging(String newHDF5File,
PositionList positionList, GenotypeMergeRule mergeRule)
Arguments:
newHDF5File -- the HDF5 file name
positionList -- The postions used for the builder
mergeRule -- rule for merging taxa
Returns:
new GenotypeTableBuilder
"""
pass
@javaStaticOverload(java_imports['GenotypeTableBuilder'],'getSiteIncremental',
(make_sig([java_imports['TaxaList']],java_imports['GenotypeTableBuilder']),
(TaxaList,), lambda x: GenotypeTableBuilder(obj=x)),
(make_sig([java_imports['TaxaList'],'int',java_imports['String']],
(TaxaList, metaInteger, metaString),
lambda x: GenotypeTableBuilder(obj=x))))
def getSiteIncremental(*args):
""" Build an alignment site by site in memory
Signatures:
static GenotypeTableBuilder getSiteIncremental(TaxaList taxaList)
static GenotypeTableBuilder getSiteIncremental(TaxaList taxaList,
int numberOfPositions, String newHDF5File)
Arguments:
static GenotypeTableBuilder getSiteIncremental(TaxaList taxaList)
taxaList -- taxa used to build to alignment
static GenotypeTableBuilder getSiteIncremental(TaxaList taxaList,
int numberOfPositions, String newHDF5File)
taxaList -- taxa used to build alignment
numberOfPositions -- Total number of positions to be added
newHDF5File -- HDF5 file to store the GenotypeTable data
Returns:
builder to add sites to
"""
pass
# TODO: Finish class
@javaStaticOverload(java_imports['GenotypeTableBuilder'],'getInstance',
(make_sig([java_imports['GenotypeCallTable'], java_imports['PositionList'],
java_imports['TaxaList'], java_imports['AlleleDepth'],
java_imports['AlleleProbability'],java_imports['ReferenceProbability'],
java_imports['Dosage'],java_imports['GeneralAnnotationStorage']],
java_imports['GenotypeTable'])))
def getInstance(*args):
""" Standard approach for creating new Alignment. Also provides methods
for creating HDF5 file based on existing Genotype, PostionList, TaxaList
Signatures:
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, AlleleDepth alleleDepth,
AlleleProbability alleleProbability, Dosage dosage,
GeneralAnnotationStorage annotations)
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, AlleleDepth alleleDepth)
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList)
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, String hdf5File)
static GenotypeTable getInstance(GenotypeTable a, String hdf5File)
static GenotypeTable getInstance(String hdf5File)
Arguments:
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, AlleleDepth alleleDepth,
AlleleProbability alleleProbability, Dosage dosage,
GeneralAnnotationStorage annotations)
genotype -- The genotype calls
positionList -- The positions
taxaList -- The taxa
alleleDepth -- allele depth
alleleProbability -- allele probability
dosage -- Dosage
annotations -- annotations
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, AlleleDepth alleleDepth)
genotype -- The genotype calls
positionList -- The positions
taxaList -- the taxa
alleleDepth -- the allele depth
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList)
genotype -- The genotype calls
positionList -- The positions
taxaList -- the taxa
static GenotypeTable getInstance(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, String hdf5File)
genotype -- The genotype calls
positionList -- The positions
taxaList -- the taxa
hdf5File -- Name of the file containing existing Genotype, PositionList, TaxaList
static GenotypeTable getInstance(GenotypeTable a, String hdf5File)
a -- Existing alignment
hdf5File -- name of the file to put in the alignment into
static GenotypeTable getInstance(String hdf5File)
hdf5File -- File containig alignment
"""
pass
|
|
#!/usr/bin/env python
from calendar import timegm
import rfc822
import time
from twitter import json, Hashtag, TwitterError, Url
class Status(object):
"""A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.contributors
status.coordinates
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.favorite_count
status.geo
status.id
status.id_str
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.lang
status.place
status.retweet_count
status.relative_created_at # read only
status.source
status.text
status.truncated
status.location
status.user
status.urls
status.user_mentions
status.hashtags
"""
def __init__(self, **kwargs):
"""An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at:
The time this status message was posted. [Optional]
favorited:
Whether this is a favorite of the authenticated user. [Optional]
favorite_count:
Number of times this status message has been favorited. [Optional]
id:
The unique id of this status message. [Optional]
id_str:
The string form of the unique id of this status message. [Optional]
text:
The text of this status message. [Optional]
location:
the geolocation string associated with this message. [Optional]
relative_created_at:
A human readable string representing the posting time. [Optional]
user:
A twitter.User instance representing the person posting the
message. [Optional]
now:
The current time, if the client chooses to set it.
Defaults to the wall clock time. [Optional]
urls:
user_mentions:
hashtags:
geo:
place:
coordinates:
contributors:
retweeted:
retweeted_status:
current_user_retweet:
retweet_count:
possibly_sensitive:
scopes:
withheld_copyright:
withheld_in_countries:
withheld_scope:
"""
param_defaults = {
'coordinates': None,
'contributors': None,
'created_at': None,
'current_user_retweet': None,
'favorited': None,
'favorite_count': None,
'geo': None,
'id': None,
'id_str': None,
'in_reply_to_screen_name': None,
'in_reply_to_user_id': None,
'in_reply_to_status_id': None,
'lang': None,
'location': None,
'now': None,
'place': None,
'possibly_sensitive': None,
'retweeted': None,
'retweeted_status': None,
'retweet_count': None,
'scopes': None,
'source': None,
'text': None,
'truncated': None,
'urls': None,
'user': None,
'user_mentions': None,
'hashtags': None,
'media': None,
'withheld_copyright': None,
'withheld_in_countries': None,
'withheld_scope': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
# Properties that you should be able to set yourself.
@property
def Text(self):
"""Get the text of this status message.
Returns:
The text of this status message.
"""
return self._text
@Text.setter
def Text(self, text):
self._text = text
@property
def InReplyToStatusId(self):
return self._in_reply_to_status_id
@InReplyToStatusId.setter
def InReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
@property
def Possibly_sensitive(self):
return self._possibly_sensitive
@Possibly_sensitive.setter
def Possibly_sensitive(self, possibly_sensitive):
self._possibly_sensitive = possibly_sensitive
@property
def Place(self):
return self._place
@Place.setter
def Place(self, place):
self._place = place
@property
def Coordinates(self):
return self._coordinates
@Coordinates.setter
def Coordinates(self, coordinates):
self._coordinates = coordinates
# Missing the following, media_ids, trim_user, display_coordinates,
# lat and long
@property
def CreatedAt(self):
"""Get the time this status message was posted.
Returns:
The time this status message was posted
"""
return self._created_at
@property
def CreatedAtInSeconds(self):
"""Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
"""
return timegm(rfc822.parsedate(self.created_at))
@property
def RelativeCreatedAt(self):
"""Get a human readable string representing the posting time
Returns:
A human readable string representing the posting time
"""
fudge = 1.25
delta = long(self.now) - long(self.CreatedAtInSeconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1 / fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1 / fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1:
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1 / fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1:
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
@property
def Favorited(self):
"""Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
"""
return self._favorited
@property
def FavoriteCount(self):
"""Get the favorite count of this status message.
Returns:
number of times this status message has been favorited
"""
return self._favorite_count
@property
def Id(self):
"""Get the unique id of this status message.
Returns:
The unique id of this status message
"""
return self._id
@property
def IdStr(self):
"""Get the unique id_str of this status message.
Returns:
The unique id_str of this status message
"""
return self._id_str
@property
def InReplyToScreenName(self):
return self._in_reply_to_screen_name
@property
def InReplyToUserId(self):
return self._in_reply_to_user_id
@property
def Truncated(self):
return self._truncated
@property
def Retweeted(self):
return self._retweeted
@property
def Source(self):
return self._source
@property
def Lang(self):
"""Get the machine-detected language of this status message
Returns:
The machine-detected language code of this status message.
"""
return self._lang
@property
def Location(self):
"""Get the geolocation associated with this status message
Returns:
The geolocation string of this status message.
"""
return self._location
@property
def User(self):
"""Get a twitter.User representing the entity posting this status message.
Returns:
A twitter.User representing the entity posting this status message
"""
return self._user
@property
def Now(self):
"""Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
"""
if self._now is None:
self._now = time.time()
return self._now
@Now.setter
def Now(self, now):
self._now = now
@property
def Geo(self):
return self._geo
@property
def Contributors(self):
return self._contributors
@property
def Retweeted_status(self):
return self._retweeted_status
@property
def RetweetCount(self):
return self._retweet_count
@property
def Current_user_retweet(self):
return self._current_user_retweet
@property
def Scopes(self):
return self._scopes
@property
def Withheld_copyright(self):
return self._withheld_copyright
@property
def Withheld_in_countries(self):
return self._withheld_in_countries
@property
def Withheld_scope(self):
return self._withheld_scope
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.location == other.location and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.retweeted == other.retweeted and \
self.favorited == other.favorited and \
self.favorite_count == other.favorite_count and \
self.source == other.source and \
self.geo == other.geo and \
self.place == other.place and \
self.coordinates == other.coordinates and \
self.contributors == other.contributors and \
self.retweeted_status == other.retweeted_status and \
self.retweet_count == other.retweet_count and \
self.current_user_retweet == other.current_user_retweet and \
self.possibly_sensitive == other.possibly_sensitive and \
self.scopes == other.scopes and \
self.withheld_copyright == other.withheld_copyright and \
self.withheld_in_countries == other.withheld_in_countries and \
self.withheld_scope == other.withheld_scope
except AttributeError:
return False
def __str__(self):
"""A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
"""
return self.AsJsonString()
def __repr__(self):
"""A string representation of this twitter.Status instance.
The return value is the ID of status, username and datetime.
Returns:
A string representation of this twitter.Status instance with
the ID of status, username and datetime.
"""
if self.user:
representation = "Status(ID=%s, screen_name='%s', created_at='%s')" % (
self.id, self.user.screen_name, self.created_at)
else:
representation = "Status(ID=%s, created_at='%s')" % (
self.id, self.created_at)
return representation
def AsJsonString(self, allow_non_ascii=False):
"""A JSON string representation of this twitter.Status instance.
To output non-ascii, set keyword allow_non_ascii=True.
Returns:
A JSON string representation of this twitter.Status instance
"""
return json.dumps(self.AsDict(), sort_keys=True,
ensure_ascii=not allow_non_ascii)
def AsDict(self):
"""A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
"""
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.favorite_count:
data['favorite_count'] = self.favorite_count
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.lang:
data['lang'] = self.lang
if self.location:
data['location'] = self.location
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.retweeted is not None:
data['retweeted'] = self.retweeted
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
if self.geo:
data['geo'] = self.geo
if self.place:
data['place'] = self.place
if self.coordinates:
data['coordinates'] = self.coordinates
if self.contributors:
data['contributors'] = self.contributors
if self.hashtags:
data['hashtags'] = [h.text for h in self.hashtags]
if self.media:
data['media'] = [m for m in self.media]
if self.retweeted_status:
data['retweeted_status'] = self.retweeted_status.AsDict()
if self.retweet_count:
data['retweet_count'] = self.retweet_count
if self.urls:
data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls])
if self.user_mentions:
data['user_mentions'] = [um.AsDict() for um in self.user_mentions]
if self.current_user_retweet:
data['current_user_retweet'] = self.current_user_retweet
if self.possibly_sensitive:
data['possibly_sensitive'] = self.possibly_sensitive
if self.scopes:
data['scopes'] = self.scopes
if self.withheld_copyright:
data['withheld_copyright'] = self.withheld_copyright
if self.withheld_in_countries:
data['withheld_in_countries'] = self.withheld_in_countries
if self.withheld_scope:
data['withheld_scope'] = self.withheld_scope
return data
@staticmethod
def NewFromJsonDict(data):
"""Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
"""
if 'user' in data:
from twitter import User
# Have to do the import here to prevent cyclic imports in the __init__.py
# file
user = User.NewFromJsonDict(data['user'])
else:
user = None
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
else:
retweeted_status = None
if 'current_user_retweet' in data:
current_user_retweet = data['current_user_retweet']['id']
else:
current_user_retweet = None
urls = None
user_mentions = None
hashtags = None
media = None
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
from twitter import User
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
if 'media' in data['entities']:
media = data['entities']['media']
else:
media = []
# the new extended entities
if 'extended_entities' in data:
if 'media' in data['extended_entities']:
media = [m for m in data['extended_entities']['media']]
return Status(created_at=data.get('created_at', None),
favorited=data.get('favorited', None),
favorite_count=data.get('favorite_count', None),
id=data.get('id', None),
text=data.get('text', None),
location=data.get('location', None),
lang=data.get('lang', None),
in_reply_to_screen_name=data.get('in_reply_to_screen_name', None),
in_reply_to_user_id=data.get('in_reply_to_user_id', None),
in_reply_to_status_id=data.get('in_reply_to_status_id', None),
truncated=data.get('truncated', None),
retweeted=data.get('retweeted', None),
source=data.get('source', None),
user=user,
urls=urls,
user_mentions=user_mentions,
hashtags=hashtags,
media=media,
geo=data.get('geo', None),
place=data.get('place', None),
coordinates=data.get('coordinates', None),
contributors=data.get('contributors', None),
retweeted_status=retweeted_status,
current_user_retweet=current_user_retweet,
retweet_count=data.get('retweet_count', None),
possibly_sensitive=data.get('possibly_sensitive', None),
scopes=data.get('scopes', None),
withheld_copyright=data.get('withheld_copyright', None),
withheld_in_countries=data.get('withheld_in_countries', None),
withheld_scope=data.get('withheld_scope', None))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware ESX/vCenter platform.
"""
import re
import time
from eventlet import event
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('host_ip',
deprecated_name='vmwareapi_host_ip',
deprecated_group='DEFAULT',
help='URL for connection to VMware ESX/VC host. Required if '
'compute_driver is vmwareapi.VMwareESXDriver or '
'vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('host_username',
deprecated_name='vmwareapi_host_username',
deprecated_group='DEFAULT',
help='Username for connection to VMware ESX/VC host. '
'Used only if compute_driver is '
'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('host_password',
deprecated_name='vmwareapi_host_password',
deprecated_group='DEFAULT',
help='Password for connection to VMware ESX/VC host. '
'Used only if compute_driver is '
'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.',
secret=True),
cfg.MultiStrOpt('cluster_name',
deprecated_name='vmwareapi_cluster_name',
deprecated_group='DEFAULT',
help='Name of a VMware Cluster ComputeResource. Used only if '
'compute_driver is vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore. '
'Used only if compute_driver is '
'vmwareapi.VMwareVCDriver.'),
cfg.FloatOpt('task_poll_interval',
default=5.0,
deprecated_name='vmwareapi_task_poll_interval',
deprecated_group='DEFAULT',
help='The interval used for polling of remote tasks. '
'Used only if compute_driver is '
'vmwareapi.VMwareESXDriver or '
'vmwareapi.VMwareVCDriver.'),
cfg.IntOpt('api_retry_count',
default=10,
deprecated_name='vmwareapi_api_retry_count',
deprecated_group='DEFAULT',
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if compute_driver is '
'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.'),
cfg.IntOpt('vnc_port',
default=5900,
deprecated_name='vnc_port',
deprecated_group='DEFAULT',
help='VNC starting port'),
cfg.IntOpt('vnc_port_total',
default=10000,
deprecated_name='vnc_port_total',
deprecated_group='DEFAULT',
help='Total number of VNC ports'),
# Deprecated, remove in Icehouse
cfg.StrOpt('vnc_password',
deprecated_name='vnc_password',
deprecated_group='DEFAULT',
help='DEPRECATED. VNC password. The password-based access to '
'VNC consoles will be removed in the next release. The '
'default value will disable password protection on the '
'VNC console.',
secret=True),
cfg.BoolOpt('use_linked_clone',
default=True,
deprecated_name='use_linked_clone',
deprecated_group='DEFAULT',
help='Whether to use linked clone'),
]
CONF = cfg.CONF
CONF.register_opts(vmwareapi_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 2.0
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
# VMwareAPI has both ESXi and vCenter API sets.
# The ESXi API are a proper sub-set of the vCenter API.
# That is to say, nearly all valid ESXi calls are
# valid vCenter calls. There are some small edge-case
# exceptions regarding VNC, CIM, User management & SSO.
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareESXDriver, self).__init__(virtapi)
self._host_ip = CONF.vmware.host_ip
if not (self._host_ip or CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, "
"host_username "
"and host_password to use "
"compute_driver=vmwareapi.VMwareESXDriver or "
"vmwareapi.VMwareVCDriver"))
self._session = VMwareAPISession(scheme=scheme)
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
self._volumeops)
self._host = host.Host(self._session)
self._host_state = None
#TODO(hartsocks): back-off into a configuration test module.
if CONF.vmware.use_linked_clone is None:
raise error_util.UseLinkedCloneConfigurationFault()
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session,
self._host_ip)
return self._host_state
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True, context=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops._power_on(instance)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
instances = self.list_instances()
if instance['uuid'] not in instances:
LOG.warn(_('Instance cannot be found in host, or in an unknown'
'state.'), instance=instance)
else:
state = vm_util.get_vm_state_from_name(self._session,
instance['uuid'])
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
# The method self._vmops.get_console_output(instance) returns
# a PNG format. The vCenter and ESX do not provide a way
# to get the text based console format.
return _("Currently there is no log available for "
"instance %s") % instance['uuid']
def get_vnc_console(self, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Retrieves the IP address of the ESX host."""
return self._host_ip
def attach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance,
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance,
mountpoint)
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': CONF.vmware.host_ip,
'username': CONF.vmware.host_username,
'password': CONF.vmware.host_password}
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
}
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
return self._get_available_resources(host_stats)
def update_host_status(self):
"""Update the status info of the host, and return those values
to the calling program.
"""
return self.host_state.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run the update first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return self._host.host_power_action(host, action)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
return 'Please refer to %s for the uptime' % CONF.vmware.host_ip
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
class VMwareVCDriver(VMwareESXDriver):
"""The ESX host connection object."""
# The vCenter driver includes several additional VMware vSphere
# capabilities that include API that act on hosts or groups of
# hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
# Get the list of clusters to be used
self._cluster_names = CONF.vmware.cluster_name
self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
self._cluster_names)
if not self.dict_mors:
raise exception.NotFound(_("All clusters specified %s were not"
" found in the vCenter")
% self._cluster_names)
# Check if there are any clusters that were specified in the nova.conf
# but are not in the vCenter, for missing clusters log a warning.
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
missing_clusters = set(self._cluster_names) - set(clusters_found)
if missing_clusters:
LOG.warn(_("The following clusters could not be found in the"
" vCenter %s") % list(missing_clusters))
self._datastore_regex = None
if CONF.vmware.datastore_regex:
try:
self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
except re.error:
raise exception.InvalidInput(reason=
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
# The _resources is used to maintain the vmops, volumeops and vcstate
# objects per cluster
self._resources = {}
self._resource_keys = set()
self._virtapi = virtapi
self._update_resources()
# The following initialization is necessary since the base class does
# not use VC state.
first_cluster = self._resources.keys()[0]
self._vmops = self._resources.get(first_cluster).get('vmops')
self._volumeops = self._resources.get(first_cluster).get('volumeops')
self._vc_state = self._resources.get(first_cluster).get('vcstate')
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host."""
self._vmops.live_migration(context, instance_ref, dest,
post_method, recover_method,
block_migration)
def get_vnc_console(self, instance):
"""Return link to instance's VNC console using vCenter logic."""
# In this situation, ESXi and vCenter require different
# API logic to create a valid VNC console connection object.
# In specific, vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_vnc_console_vcenter(instance)
def _update_resources(self):
"""This method creates a dictionary of VMOps, VolumeOps and VCState.
The VMwareVMOps, VMwareVolumeOps and VCState object is for each
cluster/rp. The dictionary is of the form
{
domain-1000 : {'vmops': vmops_obj,
'volumeops': volumeops_obj,
'vcstate': vcstate_obj,
'name': MyCluster},
resgroup-1000 : {'vmops': vmops_obj,
'volumeops': volumeops_obj,
'vcstate': vcstate_obj,
'name': MyRP},
}
"""
added_nodes = set(self.dict_mors.keys()) - set(self._resource_keys)
for node in added_nodes:
_volumeops = volumeops.VMwareVolumeOps(self._session,
self.dict_mors[node]['cluster_mor'],
vc_support=True)
_vmops = vmops.VMwareVCVMOps(self._session, self._virtapi,
_volumeops,
self.dict_mors[node]['cluster_mor'],
datastore_regex=self._datastore_regex)
name = self.dict_mors.get(node)['name']
nodename = self._create_nodename(node, name)
_vc_state = host.VCState(self._session, nodename,
self.dict_mors.get(node)['cluster_mor'])
self._resources[nodename] = {'vmops': _vmops,
'volumeops': _volumeops,
'vcstate': _vc_state,
'name': name,
}
self._resource_keys.add(node)
deleted_nodes = (set(self._resource_keys) -
set(self.dict_mors.keys()))
for node in deleted_nodes:
name = self.dict_mors.get(node)['name']
nodename = self._create_nodename(node, name)
del self._resources[nodename]
self._resource_keys.discard(node)
def _create_nodename(self, mo_id, display_name):
"""Creates the name that is stored in hypervisor_hostname column.
The name will be of the form similar to
domain-1000(MyCluster)
resgroup-1000(MyResourcePool)
"""
return mo_id + '(' + display_name + ')'
def _get_resource_for_node(self, nodename):
"""Gets the resource information for the specific node."""
resource = self._resources.get(nodename)
if not resource:
msg = _("The resource %s does not exist") % nodename
raise exception.NotFound(msg)
return resource
def _get_vmops_for_compute_node(self, nodename):
"""Retrieve vmops object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['vmops']
def _get_volumeops_for_compute_node(self, nodename):
"""Retrieve vmops object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['volumeops']
def _get_vc_state_for_compute_node(self, nodename):
"""Retrieve VCState object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['vcstate']
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
stats_dict = {}
vc_state = self._get_vc_state_for_compute_node(nodename)
if vc_state:
host_stats = vc_state.get_host_stats(refresh=True)
# Updating host information
stats_dict = self._get_available_resources(host_stats)
else:
LOG.info(_("Invalid cluster or resource pool"
" name : %s") % nodename)
return stats_dict
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
self.dict_mors = vm_util.get_all_cluster_refs_by_name(
self._session,
CONF.vmware.cluster_name)
nodes = self.dict_mors.keys()
node_list = []
self._update_resources()
for node in self.dict_mors.keys():
nodename = self._create_nodename(node,
self.dict_mors.get(node)['name'])
node_list.append(nodename)
LOG.debug(_("The available nodes are: %s") % node_list)
return node_list
def get_host_stats(self, refresh=True):
"""Return currently known host stats."""
stats_list = []
nodes = self.get_available_nodes()
for node in nodes:
stats_list.append(self.get_available_resource(node))
return stats_list
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def attach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Attach volume storage to VM instance."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.attach_volume(connection_info,
instance,
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.detach_volume(connection_info,
instance,
mountpoint)
def get_volume_connector(self, instance):
"""Return volume connector information."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.get_volume_connector(instance)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.snapshot(context, instance, name, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True, context=None):
"""Destroy VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.destroy(instance, network_info, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops._power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
for instance in instances:
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.poll_rebooting_instances(timeout, [instance])
def get_info(self, instance):
"""Return info about the VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_info(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.unplug_vifs(instance, network_info)
class VMwareAPISession(object):
"""
Sets up a session with the VC/ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip=CONF.vmware.host_ip,
username=CONF.vmware.host_username,
password=CONF.vmware.host_password,
retry_count=CONF.vmware.api_retry_count,
scheme="https"):
self._host_ip = host_ip
self._host_username = username
self._host_password = password
self._api_retry_count = retry_count
self._scheme = scheme
self._session_id = None
self.vim = None
self._create_session()
def _get_vim_object(self):
"""Create the VIM Object instance."""
return vim.Vim(protocol=self._scheme, host=self._host_ip)
def _create_session(self):
"""Creates a session with the VC/ESX host."""
delay = 1
while True:
try:
# Login and setup the session with the host for making
# API calls
self.vim = self._get_vim_object()
session = self.vim.Login(
self.vim.get_service_content().sessionManager,
userName=self._host_username,
password=self._host_password)
# Terminate the earlier session, if possible ( For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have )
if self._session_id:
try:
self.vim.TerminateSession(
self.vim.get_service_content().sessionManager,
sessionId=[self._session_id])
except Exception as excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.debug(excep)
self._session_id = session.key
return
except Exception as excep:
LOG.critical(_("Unable to connect to server at %(server)s, "
"sleeping for %(seconds)s seconds"),
{'server': self._host_ip, 'seconds': delay})
time.sleep(delay)
delay = min(2 * delay, 60)
def __del__(self):
"""Logs-out the session."""
# Logout to avoid un-necessary increase in session count at the
# ESX host
try:
# May not have been able to connect to VC, so vim is still None
if self.vim:
self.vim.Logout(self.vim.get_service_content().sessionManager)
except Exception as excep:
# It is just cautionary on our part to do a logout in del just
# to ensure that the session is not left active.
LOG.debug(excep)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""
Calls a method within the module specified with
args provided.
"""
args = list(args)
retry_count = 0
exc = None
last_fault_list = []
while True:
try:
if not self._is_vim_object(module):
# If it is not the first try, then get the latest
# vim object
if retry_count > 0:
args = args[1:]
args = [self.vim] + args
retry_count += 1
temp_module = module
for method_elem in method.split("."):
temp_module = getattr(temp_module, method_elem)
return temp_module(*args, **kwargs)
except error_util.VimFaultException as excep:
# If it is a Session Fault Exception, it may point
# to a session gone bad. So we try re-creating a session
# and then proceeding ahead with the call.
exc = excep
if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
# Because of the idle session returning an empty
# RetrievePropertiesResponse and also the same is returned
# when there is say empty answer to the query for
# VMs on the host ( as in no VMs on the host), we have no
# way to differentiate.
# So if the previous response was also am empty response
# and after creating a new session, we get the same empty
# response, then we are sure of the response being supposed
# to be empty.
if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
self._create_session()
else:
# No re-trying for errors for API call has gone through
# and is the caller's fault. Caller should handle these
# errors. e.g, InvalidArgument fault.
break
except error_util.SessionOverLoadException as excep:
# For exceptions which may come because of session overload,
# we retry
exc = excep
except Exception as excep:
# If it is a proper exception, say not having furnished
# proper data in the SOAP call or the retry limit having
# exceeded, we raise the exception
exc = excep
break
# If retry count has been reached then break and
# raise the exception
if retry_count > self._api_retry_count:
break
time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
LOG.critical(_("In vmwareapi:_call_method, "
"got this exception: %s") % exc)
raise
def _get_vim(self):
"""Gets the VIM object reference."""
if self.vim is None:
self._create_session()
return self.vim
def _wait_for_task(self, instance_uuid, task_ref):
"""
Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
done = event.Event()
loop = loopingcall.FixedIntervalLoopingCall(self._poll_task,
instance_uuid,
task_ref, done)
loop.start(CONF.vmware.task_poll_interval)
ret_val = done.wait()
loop.stop()
return ret_val
def _poll_task(self, instance_uuid, task_ref, done):
"""
Poll the given task, and fires the given Deferred if we
get a result.
"""
try:
task_info = self._call_method(vim_util, "get_dynamic_property",
task_ref, "Task", "info")
task_name = task_info.name
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success"),
{'task_name': task_name, 'task_ref': task_ref})
done.send("success")
else:
error_info = str(task_info.error.localizedMessage)
LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
"status: error %(error_info)s"),
{'task_name': task_name, 'task_ref': task_ref,
'error_info': error_info})
done.send_exception(exception.NovaException(error_info))
except Exception as excep:
LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
done.send_exception(excep)
|
|
# -*- coding: utf-8 -*-
# File: nvml.py
import threading
from ctypes import (
CDLL, POINTER, Structure, byref, c_uint,
c_ulonglong, create_string_buffer)
__all__ = ['NVMLContext']
NVML_ERROR_FUNCTION_NOT_FOUND = 13
NvmlErrorCodes = {"0": "NVML_SUCCESS",
"1": "NVML_ERROR_UNINITIALIZED",
"2": "NVML_ERROR_INVALID_ARGUMENT",
"3": "NVML_ERROR_NOT_SUPPORTED",
"4": "NVML_ERROR_NO_PERMISSION",
"5": "NVML_ERROR_ALREADY_INITIALIZED",
"6": "NVML_ERROR_NOT_FOUND",
"7": "NVML_ERROR_INSUFFICIENT_SIZE",
"8": "NVML_ERROR_INSUFFICIENT_POWER",
"9": "NVML_ERROR_DRIVER_NOT_LOADED",
"10": "NVML_ERROR_TIMEOUT",
"11": "NVML_ERROR_IRQ_ISSUE",
"12": "NVML_ERROR_LIBRARY_NOT_FOUND",
"13": "NVML_ERROR_FUNCTION_NOT_FOUND",
"14": "NVML_ERROR_CORRUPTED_INFOROM",
"15": "NVML_ERROR_GPU_IS_LOST",
"16": "NVML_ERROR_RESET_REQUIRED",
"17": "NVML_ERROR_OPERATING_SYSTEM",
"18": "NVML_ERROR_LIB_RM_VERSION_MISMATCH",
"999": "NVML_ERROR_UNKNOWN"}
class NvmlException(Exception):
def __init__(self, error_code):
super(NvmlException, self).__init__(error_code)
self.error_code = error_code
def __str__(self):
return NvmlErrorCodes[str(self.error_code)]
def _check_return(ret):
if (ret != 0):
raise NvmlException(ret)
return ret
class NVML(object):
"""
Loader for libnvidia-ml.so
"""
_nvmlLib = None
_lib_lock = threading.Lock()
def load(self):
with self._lib_lock:
if self._nvmlLib is None:
self._nvmlLib = CDLL("libnvidia-ml.so.1")
function_pointers = ["nvmlDeviceGetName", "nvmlDeviceGetUUID", "nvmlDeviceGetMemoryInfo",
"nvmlDeviceGetUtilizationRates", "nvmlInit_v2", "nvmlShutdown",
"nvmlDeviceGetCount_v2", "nvmlDeviceGetHandleByIndex_v2"]
self.func_ptr = {n: self._function_pointer(n) for n in function_pointers}
def _function_pointer(self, name):
try:
return getattr(self._nvmlLib, name)
except AttributeError:
raise NvmlException(NVML_ERROR_FUNCTION_NOT_FOUND)
def get_function(self, name):
if name in self.func_ptr.keys():
return self.func_ptr[name]
_NVML = NVML()
class NvidiaDevice(object):
"""Represent a single GPUDevice"""
def __init__(self, hnd):
super(NvidiaDevice, self).__init__()
self.hnd = hnd
def memory(self):
"""Memory information in bytes
Example:
>>> print(ctx.device(0).memory())
{'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L}
Returns:
total/used/free memory in bytes
"""
class GpuMemoryInfo(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
c_memory = GpuMemoryInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory)))
return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used}
def utilization(self):
"""Percent of time over the past second was utilized.
Details:
Percent of time over the past second during which one or more kernels was executing on the GPU.
Percent of time over the past second during which global (device) memory was being read or written
Example:
>>> print(ctx.device(0).utilization())
{'gpu': 4L, 'memory': 6L}
"""
class GpuUtilizationInfo(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
c_util = GpuUtilizationInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetUtilizationRates")(self.hnd, byref(c_util)))
return {'gpu': c_util.gpu, 'memory': c_util.memory}
def name(self):
buflen = 1024
buf = create_string_buffer(buflen)
fn = _NVML.get_function("nvmlDeviceGetName")
ret = fn(self.hnd, buf, c_uint(1024))
_check_return(ret)
return buf.value.decode('utf-8')
class NVMLContext(object):
"""Creates a context to query information
Example:
with NVMLContext() as ctx:
num_gpus = ctx.num_devices()
for device in ctx.devices():
print(device.memory())
print(device.utilization())
"""
def __enter__(self):
"""Create a new context """
_NVML.load()
_check_return(_NVML.get_function("nvmlInit_v2")())
return self
def __exit__(self, type, value, tb):
"""Destroy current context"""
_check_return(_NVML.get_function("nvmlShutdown")())
def num_devices(self):
"""Get number of devices """
c_count = c_uint()
_check_return(_NVML.get_function(
"nvmlDeviceGetCount_v2")(byref(c_count)))
return c_count.value
def devices(self):
"""
Returns:
[NvidiaDevice]: a list of devices
"""
return [self.device(i) for i in range(self.num_devices())]
def device(self, idx):
"""Get a specific GPU device
Args:
idx: index of device
Returns:
NvidiaDevice: single GPU device
"""
class GpuDevice(Structure):
pass
c_nvmlDevice_t = POINTER(GpuDevice)
c_index = c_uint(idx)
device = c_nvmlDevice_t()
_check_return(_NVML.get_function(
"nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device)))
return NvidiaDevice(device)
if __name__ == '__main__':
with NVMLContext() as ctx:
for idx, dev in enumerate(ctx.devices()):
print(idx, dev.name())
with NVMLContext() as ctx:
print(ctx.devices())
print(ctx.devices()[0].utilization())
|
|
#!/usr/bin/arch -32 /System/Library/Frameworks/Python.framework/Versions/2.7/bin/python
#!/usr/bin/env python
#a Imports
import pygame
import sys, os
sys.path.insert(0, os.path.abspath('../python'))
import gjslib.math.bezier as bezier
import gjslib.graphics.font as font
import gjslib.math.mesh as mesh
#a Polygon classes
#c c_polygon_point
class c_polygon_point( object ):
#f __init__
def __init__( self, pt ):
self.pt = pt
self.triangles = []
self.line_segments = []
self.entry_number = -1
pass
#f check_consistent
def check_consistent( self ):
for l in self.line_segments:
if self not in l[0].get_points():
raise Exception("Failed to find point %s in line %s"%(str(self),str(l)))
if self.line_segments.count(l)>1:
raise Exception("Duplicate line in point %s: line %s"%(str(self),str(l)))
pass
for t in self.triangles:
if t is not None:
if self not in t.pts:
raise Exception("Failed to find point %s in triangle %s"%(str(self),str(t)))
pass
if self.triangles.count(t)>1:
raise Exception("Duplicate triangle in point %s: triangle %s"%(str(self),str(t)))
pass
pass
#f coords
def coords( self ):
return self.pt.coords
#f add_to_line
def add_to_line( self, line, other, line_pt_num ):
#print "adding point to line as point",self, line, line_pt_num
self.line_segments.append( (line, other, line_pt_num) )
#print self.line_segments
pass
#f remove_from_line
def remove_from_line( self, line ):
#print "removing point from line",self, line, self.line_segments
for i in range(len(self.line_segments)):
if self.line_segments[i][0]==line:
self.line_segments.pop(i)
return
pass
die_horribly
return
#f is_on_line
def is_on_line( self, line ):
for (l,pt,x) in self.line_segments:
if l == line: return True
pass
return False
#f find_line_segment_to
def find_line_segment_to( self, other, mesh_only=False ):
for (l,pt,x) in self.line_segments:
if mesh_only and (l.triangles[0] is None): continue
if pt==other: return l
pass
return None
#f used_in_lines
def used_in_lines( self ):
return len(self.line_segments)>0
#f add_to_triangle
def add_to_triangle( self, triangle ):
self.triangles.append(triangle)
pass
#f remove_from_triangle
def remove_from_triangle( self, triangle ):
self.triangles.remove(triangle)
pass
#f compare_with
def compare_with( self, other ):
for i in range(len(self.pt.coords)):
if self.pt.coords[i]<other.coords[i]: return -1
if self.pt.coords[i]>other.coords[i]: return 1
return 0
#f __repr__
def __repr__( self ):
lines = ""
for l in self.line_segments: lines += "%d,"%l[0].line_num
tris = ""
for t in self.triangles: tris += "%d,"%t.triangle_num
return "pt(%d:%s:(%s):(%s))"%(self.entry_number,str(self.pt),lines,tris)
#c c_polygon_line
class c_polygon_line( object ):
"""
A line in a polygon
It has two points leading in the 'direction' of the line
It has two triangles (either of which may be None)
"""
line_log = []
#f __init__
def __init__( self, pt0, pt1, on_contour=True ):
self.line_num = len(self.line_log)
self.line_log.append(self)
self.pts = (pt0, pt1)
self.direction = pt1.pt.add(pt0.pt,factor=-1)
self.triangles = (None, None)
self.on_contour = on_contour
pt0.add_to_line( self, pt1, 0 )
pt1.add_to_line( self, pt0, 1 )
self.in_use = True
pass
#f check_consistent
def check_consistent( self ):
for pt in self.pts:
if not pt.is_on_line(self):
raise Exception("Failed to find %s in %s"%(str(self),str(pt)))
if self.pts.count(pt)>1:
raise Exception("Duplicate point in %s: %s"%(str(self),str(pt)))
pass
if (self.triangles[0] is None) and (self.triangles[1] is not None):
raise Exception("Badly formed triangle tuple in line %s"%(str(self)))
for t in self.triangles:
if t is None: continue
t_pts = t.get_points()
for pt in self.pts:
if pt not in t_pts:
raise Exception("Point %s in %s not in line's triangle %s"%(str(pt),str(self),str(t)))
pass
if self not in t.find_lines(mesh_only=True):
raise Exception("%s not found in %s"%(str(self),str(t)))
pass
pass
#f get_points
def get_points( self ):
return self.pts
#f num_triangles
def num_triangles( self ):
"""Return number of triangles the line is part of (0, 1, or 2)."""
if self.triangles[0] is None: return 0
if self.triangles[1] is None: return 1
return 2
#f is_parallel_to
def is_parallel_to( self, other ):
return self.direction.is_parallel_to(other.direction)
#f add_triangle
def add_triangle( self, tri ):
if self.triangles[0] is None:
self.triangles=(tri,None)
pass
elif self.triangles[1] is None:
self.triangles=(self.triangles[0],tri)
pass
else:
die_horribly
return tri
#f swap_triangle
def swap_triangle( self, tri_from, tri_to ):
if self.triangles[0]==tri_from:
self.triangles = (tri_to,self.triangles[1])
pass
elif self.triangles[1]==tri_from:
self.triangles = (self.triangles[0],tri_to)
pass
return
#f merge
def merge( self, other ):
"""
Either self.pts[1]==other.pts[0] or self.pts[0]==other.pts[1]
Either way we are basically removing the lines 'self' and 'other'
"""
self_index = 0
if self.pts[1]==other.pts[0]:
self_index = 1
pass
self.pts[0].remove_from_line( self )
self.pts[1].remove_from_line( self )
other.pts[0].remove_from_line( other )
other.pts[1].remove_from_line( other )
return c_polygon_line( self.pts[1-self_index], other.pts[self_index] )
#f can_shorten_diagonal
def can_shorten_diagonal( self ):
"""
If the line has two triangles (one on each side) then see if the quad formed is convex and the shorter diagonal could be taken
"""
if self.triangles[1] is None: return False
quad = [self.pts[0].coords(), None, self.pts[1].coords(), None]
quad[1] = self.triangles[0].get_other_point( self.pts ).coords()
quad[3] = self.triangles[1].get_other_point( self.pts ).coords()
(dx0, dy0) = ( quad[2][0]-quad[0][0], quad[2][1]-quad[0][1] )
(dx1, dy1) = ( quad[3][0]-quad[1][0], quad[3][1]-quad[1][1] )
if (dx0*dx0+dy0*dy0 <= dx1*dx1+dy1*dy1): return False
# Are both quad[0] and quad[2] on the same side of the line quad[3]-quad[1]? If so, concave :-(
if ( ( dy1*(quad[0][0]-quad[1][0])-dx1*(quad[0][1]-quad[1][1]) ) *
( dy1*(quad[2][0]-quad[1][0])-dx1*(quad[2][1]-quad[1][1]) ) ) > 0: return False
return True
#f swap_diagonal
def swap_diagonal( self, p, verbose=False ):
"""
Trusting that the line has two triangles (ABX and ABY) move it to be XY and the triangles to be AYX and XBY
For consistency, the line has to be removed from points A and B
Point B has to be removed from the first triangle
Point A has to be removed from the second triangle
Point Y has to be added to the first triangle
Point X has to be added to the second triangle
The line is added to points X and Y (line 'XY')
The line is changed to be X and Y
The first triangle changes its points from ABX to AXY
The second triangle changes its points from ABY to XBY
"""
if self.triangles[1] is None: die_horribly
A = self.pts[0]
B = self.pts[1]
X = self.triangles[0].get_other_point( self.pts )
Y = self.triangles[1].get_other_point( self.pts )
if verbose:
p.check_consistent()
print "********************************************************************************"
print p.__repr__(verbose=True)
print "********************************************************************************"
print "Swapping diagonal (A,B) with (X,Y) for line",A,B,X,Y,self
print "Triangles ",self.triangles[0], self.triangles[1]
self.pts = ( X, Y )
self.triangles[0].change_point( B, Y, self.triangles[1] )
self.triangles[1].change_point( A, X, self.triangles[0] )
#print "Now", A,B,X,Y,self
#print "Triangles ",self.triangles[0], self.triangles[1]
A.remove_from_line( self )
B.remove_from_line( self )
X.add_to_line( self, Y, 0 )
Y.add_to_line( self, X, 1 )
#print "********************************************************************************"
#print p.__repr__(verbose=True)
#print "********************************************************************************"
#p.check_consistent()
pass
def __repr__( self ):
tris = "(X,X)"
if self.triangles[1] is not None: tris="(%d,%d)"%(self.triangles[0].triangle_num,self.triangles[1].triangle_num)
elif self.triangles[0] is not None: tris="(%d,X)"%(self.triangles[0].triangle_num)
return "line(%d:%d:%d:%d:%s)"%(self.line_num,self.on_contour,self.pts[0].entry_number,self.pts[1].entry_number,tris)
#c c_polygon_triangle
class c_polygon_triangle( object ):
triangle_log = []
#f __init__
def __init__( self, pts ):
"""
To create a triangle, the lines between the points must have already been created so that the points are connected
"""
self.triangle_num = len(self.triangle_log)
self.triangle_log.append(self)
self.pts = list(pts)
pts[0].add_to_triangle( self )
pts[1].add_to_triangle( self )
pts[2].add_to_triangle( self )
pts[0].find_line_segment_to( pts[1] ).add_triangle( self )
pts[1].find_line_segment_to( pts[2] ).add_triangle( self )
pts[2].find_line_segment_to( pts[0] ).add_triangle( self )
pass
#f check_consistent
def check_consistent( self ):
for l in self.find_lines(mesh_only=True):
if self not in l.triangles:
raise Exception("Failed to find %s in %s"%(str(self),str(l)))
pass
pass
#f find_lines
def find_lines( self, mesh_only=False ):
lines = []
for i in range(3):
pt = self.pts[i]
for j in range(1,3):
l = pt.find_line_segment_to(self.pts[(i+j)%3], mesh_only)
if l not in lines:
lines.append(l)
pass
pass
pass
return lines
#f change_point
def change_point( self, pt_from, pt_to, other_triangle ):
for i in range(3):
if self.pts[i]==pt_from:
for j in range(1,3):
l = pt_from.find_line_segment_to( self.pts[(i+j)%3], mesh_only=True )
if (self in l.triangles) and not (other_triangle in l.triangles):
l.swap_triangle( self, other_triangle)
pass
pass
pt_from.remove_from_triangle( self )
pt_to.add_to_triangle( self )
self.pts[i] = pt_to
return
pass
die_horribly
pass
#f get_other_point
def get_other_point( self, pts ):
for p in self.pts:
if p not in pts: return p
pass
return None
#f get_points
def get_points( self ):
return self.pts
#f __repr__
def __repr__( self ):
return "tri(%d:%d,%d,%d)"%(self.triangle_num,self.pts[0].entry_number,self.pts[1].entry_number,self.pts[2].entry_number)
#c c_polygon
class c_polygon( object ):
"""
A polygon is a set of line segments, each of which has two points
"""
#f __init__
def __init__( self ):
self.reset()
pass
#f reset
def reset( self ):
self.line_segments = []
self.triangles = []
self.point_set = []
self.contour = []
pass
#f check_consistent
def check_consistent( self ):
for pt in self.point_set:
pt.check_consistent()
pass
for l in self.line_segments:
l.check_consistent()
pass
for t in self.triangles:
t.check_consistent()
pass
pass
#f find_insertion_index
def find_insertion_index( self, set, element, compare_fn ):
"""
Find where to insert an element in a set, using a specified compare_fn
compare_fn takes (set_element, element) and returns -1 if set_element<element, 0 if set_element==element, 1 if set_element>element
Return (index, match); if match is True then element compares to match (compare_fn returned 0)
index is the index of the entry in 'set' to insert 'element' before to maintain an ordered list (i.e. set[index-1]<element<set[index])
Use a binary search
"""
l = len(set)
if l==0: return (0, False)
(i0, i1) = (-1,l)
while i0<i1-1:
im = (i0+i1)/2
cmp = compare_fn( set[im], element )
if cmp==0:
return (im, True)
if cmp<0: # i.e. im is 'less' than element
i0 = im
pass
else:
i1 = im
pass
pass
return (i1, False)
#f add_point
def add_point( self, point ):
"""
Add an external representation of a point to our set, and return that object
If the point is already in our set, return that object
Keep the point set ordered by coords
"""
(ins, match) = self.find_insertion_index( self.point_set, point, lambda s,e:s.compare_with(e) )
if match: return self.point_set[ins]
new_point = c_polygon_point( point )
self.point_set.insert( ins, new_point )
return new_point
#f add_line
def add_line( self, pt0, pt1, on_contour=False ):
line = c_polygon_line(pt0,pt1,on_contour)
self.line_segments.append( line )
return line
#f find_or_create_line
def find_or_create_line( self, pt0, pt1, on_contour=False ):
line = pt0.find_line_segment_to(pt1)
if line is not None: return line
return self.add_line( pt0, pt1, on_contour )
#f add_triangle_from_points
def add_triangle_from_points( self, pts ):
"""
Must add a triangle from points to ensure that the winding order is passed in cleanly
"""
self.find_or_create_line( pts[0], pts[1] )
self.find_or_create_line( pts[1], pts[2] )
self.find_or_create_line( pts[2], pts[0] )
triangle = c_polygon_triangle( pts )
self.triangles.append(triangle)
return triangle
#f from_points
def from_points( self, points ):
"""
points is a list of c_points
Must create the set of c_polygon_point, and the line segments that use them
The points list can then be sorted to start with the smallest X point (with smallest Y on ties)
Once sorted, one can reorder the line segments to start at that smallest X point.
This is a normalized polygon
"""
self.line_segments = []
p0 = points[-1]
for i in range(len(points)):
p1 = points[i]
self.add_line( p0, p1, True )
p0 = p1
pass
pass
#f from_bezier_list
def from_bezier_list( self, bezier_list, straightness=1000 ):
epsilon = 1e-9
self.reset()
points = []
i = 0
for b in bezier_list:
subbeziers = b.break_into_segments(straightness)
for s in subbeziers:
s.pts[0].perturb(i*epsilon)
points.append(self.add_point(s.pts[0]) )
i += 1
pass
pass
self.from_points( points )
return self
#f number_points
def number_points( self ):
for i in range(len(self.point_set)):
self.point_set[i].entry_number = i
pass
pass
#f find_first_segment
def find_first_segment( self ):
first_segment = None
for i in range(len(self.line_segments)):
if self.line_segments[i].pts[0].entry_number==0:
return i
break
pass
return None
#f normalize
def normalize( self ):
self.number_points()
self.remove_empty_triangles()
pass
#f remove_empty_triangles
def remove_empty_triangles( self ):
"""
Find all pair of consecutive line segments make a 'zero area' triangle, and merge to a single line segment
If the line segments are (a, b) and (b,c), and b-a and c-b are parallel, then change first line segment to be (a,c) and remove the second
"""
l = len(self.line_segments)
i = l-1
while i>0:
if self.line_segments[i].is_parallel_to(self.line_segments[(i+1)%l]):
self.line_segments[i] = self.line_segments[i].merge(self.line_segments[(i+1)%l])
self.line_segments.pop((i+1)%l)
if (i+1==l): i-= 2
l -= 1
pass
else:
i -= 1
pass
pass
pass
#f fill_with_triangles
def fill_with_triangles( self ):
"""
Starting with a normalized polygon, we can generate filled triangles
A normalized polygon has a sorted set of points and a list of line segments starting at any point without any consecutive parallel line segments
Since we can guarantee that no consecutive line segments are parallel, two consecutive line segments must form a non-zero area triangle
So, if we find the top-most of the left-most points, and the line segments starting there, we can guarantee that the triangle with those line segments is in the convex hull.
Note that it _MAY_ overlap with another line segment (i.e. some other point in the point set may lie within this triangle)
... argh
Find all points within the triangle
"""
first_segment = self.find_first_segment()
if first_segment is None:
die_horribly
pass
lines_to_do = self.line_segments[first_segment:]
lines_to_do.extend(self.line_segments[:first_segment])
pass
#f fill_convex_hull_with_triangles
def fill_convex_hull_with_triangles( self ):
"""
Starting with a normalized polygon, we can generate filled triangles
A normalized polygon has a sorted set of points and a list of line segments starting at any point without any consecutive parallel line segments
Since we can guarantee that no consecutive line segments are parallel, two consecutive line segments must form a non-zero area triangle
Sort all points after the 'first point (x0,y0) (left-most, top-most)' by angle to y=y0 (-90,+90).
The sweep all points creating triangles from (x0,y0) to (xn,yn), (xn+1,yn+1) in the swept order
"""
radial_order = []
(x0,y0) = self.point_set[0].coords()
for pt in self.point_set[1:]:
if not pt.used_in_lines(): continue
(x,y) = pt.coords()
(dx,dy) = (x-x0,y-y0)
(ins, match) = self.find_insertion_index( radial_order, (dx,dy), lambda s,e:s[0]*e[1]-s[1]*e[0] )
radial_order.insert( ins, (dx, dy, pt) )
pass
radial_order.insert(0,(0,0,self.point_set[0]))
num_points_used = len(radial_order)
# First fill from the 'starter point'
for i in range(2,num_points_used):
self.add_triangle_from_points( (radial_order[0][2],radial_order[i-1][2], radial_order[i][2]) )
pass
# Now fill in the perimeter (Graham Scan)
hull_pts = [ radial_order[0][2], radial_order[1][2], radial_order[2][2] ]
next_point = 3
while next_point<num_points_used:
# Check for concavity from (hull_pts[-2], hull_pts[-1]) and (hull_pts[-1],next_point)
# If not, can add next_point to the hull
# If there is, then:
# add a triangle to fill in the concavity
# pop hull_pts[-1] from the points on the hull
is_concavity = True
if (len(hull_pts)<2):
is_concavity = False
pass
else:
(x0,y0) = hull_pts[-2].coords()
(x1,y1) = hull_pts[-1].coords()
(x2,y2) = radial_order[next_point][2].coords()
(dx1,dy1) = (x1-x0,y1-y0)
(dx2,dy2) = (x2-x0,y2-y0)
is_concavity = ((dx1*dy2-dy1*dx2)>0)
pass
if is_concavity:
self.add_triangle_from_points( (hull_pts[-2], hull_pts[-1], radial_order[next_point][2] ) )
hull_pts.pop()
pass
else:
hull_pts.append( radial_order[next_point][2] )
next_point = next_point+1
pass
pass
# Remove any zero-area triangles which
#for l in self.line_segments:
# tris = l.get_triangles()
# for t in tris:
# if t.is_degenerate_with_longest_side(l):
# l.swap_as_diagonals()
# break
# pass
# pass
# Now ensure that the actual lines forming the polygon are in the set of lines
# tricky
pass
#f shorten_quad_diagonals
def shorten_quad_diagonals( self, verbose=False ):
"""
Return number of triangles changed
"""
work_done = 0
for i in range(len(self.line_segments)):
l = self.line_segments[i]
if l.can_shorten_diagonal():
l.swap_diagonal( self, verbose=verbose )
work_done += 1
#self.check_consistent()
#return
pass
pass
return work_done
#f __repr__
def __repr__( self, verbose=False ):
result = "polygon\n"
result += " pts: %s\n"%(str(self.point_set))
result += " lns: %s\n"%(str(self.line_segments))
result += " tris: %s\n"%(str(self.triangles))
if verbose:
result = result.replace(", ",",\n ")
return result
#c Polygon pygame test
def polygon_test_get_draw_fn():
x = c_polygon()
bezier_list = []
for (p0,c0,p1) in [ ( (100,100), (110,90), (150, 50) ),
( (150, 50), (150,150), (200,100) ),
( (200,100), (150,150), (200,200) ),
( (200,200), (175,175), (150,150) ),
( (150,150), (125,125), (100,100) ),
]:
p0 = bezier.c_point(coords=p0)
c0 = bezier.c_point(coords=c0)
p1 = bezier.c_point(coords=p1)
bezier_list.append(bezier.c_bezier2(pts=(p0,c0,p1)))
pass
f = font.c_font("benegraphic").load_from_ttx("/Users/gstark_old/a.ttx")
bezier_list = f.create_bezier_lists( "ampersand" )[0]
print bezier_list
x.from_bezier_list( bezier_list=bezier_list, straightness=10 )
#print x
print "Normalizing"
x.normalize()
print x.__repr__(verbose=True)
print "Filling"
x.fill_convex_hull_with_triangles()
print x.__repr__(verbose=True)
x.check_consistent()
print "Shorten"
i=0
while x.shorten_quad_diagonals(i>100)>0:
i+=1
if i>101:break
pass
print "broken out after",i
x.check_consistent()
def draw_fn( screen ):
draw_triangles( screen, x.triangles )
draw_contour( screen, x )
return draw_fn
#c mesh_test_get_draw_fn
def mesh_test_get_draw_fn():
x = mesh.c_mesh()
bezier_list = []
for (p0,c0,p1) in [ ( (100,100), (110,90), (150, 50) ),
( (150, 50), (150,150), (200,100) ),
( (200,100), (150,150), (200,200) ),
( (200,200), (175,175), (150,150) ),
( (150,150), (125,125), (100,100) ),
]:
p0 = bezier.c_point(coords=p0)
c0 = bezier.c_point(coords=c0)
p1 = bezier.c_point(coords=p1)
bezier_list.append(bezier.c_bezier2(pts=(p0,c0,p1)))
pass
font_dir = "../../fonts/"
fontname = "beneg___"
#fontname = "SF Old Republic SC Bold"
glyphname = "ampersand"
glyphname = "dollar"
glyphname = "A"
glyphname = "C"
glyphname = "m"
#glyphname = "A"
#glyphname = "N"
#glyphname = "F"
#glyphname = "P"
font.c_font.convert_ttf_to_ttx( font_dir+fontname+".ttf", font_dir+"a.ttx" )
f = font.c_font("benegraphic").load_from_ttx( font_dir+"a.ttx")
contours = f.create_bezier_lists( glyphname )
x = f.get_mesh( glyphname, straightness=50 )
bbox = f.get_bbox(glyphname )
pygame.font.init()
pyfont = pygame.font.SysFont(u'palatino',10)
#pyfont=None
def draw_fn( screen ):
size = 800.0
scale = size/bbox[2]
if 100.0/bbox[3]<scale: scale=size/bbox[3]
scale = ( scale, -scale )
offset=( 0, size)
draw_triangles( screen, x.triangles, scale=scale, offset=offset, font=pyfont )
draw_contour( screen, x, scale=scale, offset=offset )
return draw_fn
#a Bezier test
def bezier_test_get_draw_fn():
f = font.c_font("benegraphic").load_from_ttx("/Users/gstark_old/a.ttx")
print f.glyphs
beziers = []
def create_bezier_lists( glyph ):
bezier_lists = []
for c in glyph.glyph["contours"]:
beziers = []
i = 0
p0 = c0 = p1 = None
while i<len(c):
(p0,c0,p1) = (c0,p1,bezier.c_point(coords=c[i]))
if ((i&1)==0) and p0 is not None:
beziers.append( bezier.c_bezier2( pts=(p0,c0,p1) ) )
pass
i += 1
pass
bezier_lists.append(beziers)
pass
return bezier_lists
def create_polygons( bezier_lists, straightness=10 ):
polygons = []
for bl in bezier_lists:
line_segments = []
for b in bl:
subbeziers = b.break_into_segments(straightness)
for s in subbeziers:
line_segments.append( s.pts[0] )
pass
line_segments.append( s.pts[2] )
pass
polygons.append(line_segments)
pass
return polygons
bezier_lists = create_bezier_lists( f.glyphs["ampersand"] )
polygons = create_polygons( bezier_lists )
split_beziers = []
for bl in bezier_lists:
for b in bl:
split_beziers.extend(b.break_into_segments(100))
pass
pass
pass
def draw_fn( screen ):
draw_world( screen, bezier_lists )
#draw_world( screen, (split_beziers,) )
draw_polygons( screen, polygons )
return draw_fn
#a Drawing functions
#c c_screen
class c_screen( object ):
def __init__( self, screen ):
self.screen = screen
pass
def draw_line( self, x0, y0, x1, y1, color ):
draw_line( self.screen, x0, y0, x1, y1, color )
pass
def draw_dot( self, x, y, color ):
draw_dot( self.screen, x, y, color )
pass
def fill_polygon( self, points_list, color ):
pygame.draw.polygon(self.screen, color, points_list )
pass
def blit( self, surface, coords ):
self.screen.blit( surface, coords )
pass
#f draw_dot
def draw_dot( screen, x, y, color ):
screen.fill(color, rect=(x,y,1,1) )
#f draw_line
def draw_line( screen, x0, y0, x1, y1, color ):
if (x0>x1): return draw_line( screen, x1, y1, x0, y0, color )
dx = x1-x0
dy = y1-y0
if (dy>0) and (dx>=dy): return draw_line_by_dx( screen, x0, y0, dx, dy, +1, color )
if (dy<=0) and (dx>=-dy): return draw_line_by_dx( screen, x0, y0, dx, -dy, -1, color )
if (dy<0): (x0,y0,x1,y1) = (x1,y1,x0,y0)
dx = x1-x0
dy = y1-y0
if (dx>0):
return draw_line_by_dy( screen, x0, y0, dx, dy, +1, color )
return draw_line_by_dy( screen, x0, y0, -dx, dy, -1, color )
#f draw_line_by_dx
def draw_line_by_dx( screen, x, y, dx, dy, pdy, color ):
"""Called if dx>=dy"""
px = int(x)
py = int(y)
error = -(dx/2)
for i in range(int(dx)):
draw_dot(screen,px,py,color)
px = px+1
error = error + dy
if error>0:
py=py+pdy
error = error-dx
pass
pass
pass
#f draw_line_by_dy
def draw_line_by_dy( screen, x, y, dx, dy, pdx, color ):
"""Called if dy>dx"""
px = int(x)
py = int(y)
error = -(dy/2)
for i in range(int(dy)):
draw_dot(screen,px,py,color)
py = py+1
error = error + dx
if error>0:
px=px+pdx
error = error-dy
pass
pass
pass
#f draw_world
def draw_world(screen, object_lists):
screen.draw_line( 0,0, 100,100, (128,128,128,255) )
screen.draw_line( 200,150, 100,100, (128,128,128,255) )
screen.draw_line( 200,150, 300,600, (128,128,128,255) )
for ol in object_lists:
for o in ol:
screen.draw_line( o.pts[0].coords[0],1000-o.pts[0].coords[1],
o.pts[2].coords[0], 1000-o.pts[2].coords[1], (o.s*50,255,255,255) )
pass
pass
pass
#f draw_polygons
def draw_polygons(screen, polygons):
for poly in polygons[0],:
last_pt = poly[-1]
last_pt = None
for pt in poly:
if last_pt is not None:
screen.draw_line( last_pt.coords[0],last_pt.coords[1],
pt.coords[0], pt.coords[1], (255,128,128,255) )
last_pt = pt
pass
points_list = []
for pt in poly:
points_list.append( (pt.coords[0], pt.coords[1]) )
pass
screen.fill_polygon( points_list, (128,255,128,255) )
pass
pass
#f draw_triangles
def draw_triangles(screen, triangles, scale=(1,1), offset=(0,0), font=None):
i = 0
num = len(triangles)
text_list = []
for t in triangles:
points_list = []
brightness = 100
if t.winding_order is not None and t.winding_order>0: brightness=255
centre = [0,0]
for pt in t.get_points():
c = pt.coords()
(x,y) = (c[0]*scale[0]+offset[0], c[1]*scale[1]+offset[1])
centre[0] += x/3.0
centre[1] += y/3.0
points_list.append( (x,y) )
pass
screen.fill_polygon( points_list, (brightness*i/num,brightness-brightness*i/num,brightness,255) )
if font is not None:
text_list.append( (centre, str(t.triangle_num)) )
pass
i+=1
pass
for (centre, text) in text_list:
p = font.render(text,False,(255,255,255))
screen.blit(p,centre)
pass
pass
#f draw_contour
def draw_contour(screen, polygon, scale=(1,1), offset=(0,0)):
i = 0
for l in polygon.line_segments:
#if not l.on_contour: continue
coords = []
for pt in l.get_points():
c = pt.coords()
coords.append( c[0]*scale[0]+offset[0] )
coords.append( c[1]*scale[1]+offset[1] )
pass
screen.draw_line( coords[0], coords[1], coords[2], coords[3], (64,255,64,255) )
pass
#a Toplevel
(w,h) = (1000,1000)
max_straightness = 2
screen = pygame.display.set_mode((w,h),pygame.DOUBLEBUF|pygame.HWSURFACE)
my_screen = c_screen(screen)
#draw_fn = bezier_test_get_draw_fn()
#draw_fn = polygon_test_get_draw_fn()
draw_fn = mesh_test_get_draw_fn()
def loop():
alive = True
while alive:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); sys.exit();
pass
pass
draw_fn( my_screen )
pygame.display.flip()
msElapsed = clock.tick(15)
pass
pass
clock = pygame.time.Clock()
loop()
a.ttLib
|
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The pulse qobj models."""
from marshmallow.validate import Range, Regexp, Length, OneOf
from qiskit.qobj.utils import MeasReturnType
from qiskit.validation import BaseSchema, bind_schema, BaseModel
from qiskit.validation.fields import (Integer, String, Number, Float, Complex, List,
Nested, DictParameters, ByType)
from .base import (QobjInstructionSchema, QobjExperimentConfigSchema, QobjExperimentSchema,
QobjConfigSchema, QobjInstruction, QobjExperimentConfig,
QobjExperiment, QobjConfig)
class QobjMeasurementOptionSchema(BaseSchema):
"""Schema for QobjMeasOptiton."""
# Required properties.
name = String(required=True)
params = DictParameters(valid_value_types=(int, float, str, bool, type(None)),
required=True)
class PulseLibraryItemSchema(BaseSchema):
"""Schema for PulseLibraryItem."""
# Required properties.
name = String(required=True)
samples = List(Complex(), required=True, validate=Length(min=1))
class PulseQobjInstructionSchema(QobjInstructionSchema):
"""Schema for PulseQobjInstruction."""
# pylint: disable=invalid-name
# Required properties
t0 = Integer(required=True, validate=Range(min=0))
# Optional properties.
ch = String(validate=Regexp('[dum]([0-9])+'))
conditional = Integer(validate=Range(min=0))
val = ByType([Complex(), String()])
phase = ByType([Float(), String()])
duration = Integer(validate=Range(min=1))
qubits = List(Integer(validate=Range(min=0)), validate=Length(min=1))
memory_slot = List(Integer(validate=Range(min=0)), validate=Length(min=1))
register_slot = List(Integer(validate=Range(min=0)), validate=Length(min=1))
kernels = Nested(QobjMeasurementOptionSchema, many=True)
discriminators = Nested(QobjMeasurementOptionSchema, many=True)
label = String()
type = String()
class PulseQobjExperimentConfigSchema(QobjExperimentConfigSchema):
"""Schema for PulseQobjExperimentConfig."""
# Optional properties.
qubit_lo_freq = List(Number())
meas_lo_freq = List(Number())
class PulseQobjExperimentSchema(QobjExperimentSchema):
"""Schema for PulseQobjExperiment."""
# Required properties.
instructions = Nested(PulseQobjInstructionSchema, required=True, many=True,
validate=Length(min=1))
# Optional properties.
config = Nested(PulseQobjExperimentConfigSchema)
class PulseQobjConfigSchema(QobjConfigSchema):
"""Schema for PulseQobjConfig of device backend."""
# Required properties.
meas_level = Integer(required=True, validate=Range(min=0, max=2))
meas_return = String(required=True, validate=OneOf(choices=(MeasReturnType.AVERAGE,
MeasReturnType.SINGLE)))
pulse_library = Nested(PulseLibraryItemSchema, required=True, many=True)
qubit_lo_freq = List(Number(validate=Range(min=0)), required=True)
meas_lo_freq = List(Number(validate=Range(min=0)), required=True)
# Optional properties.
memory_slot_size = Integer(validate=Range(min=1))
rep_time = Integer(validate=Range(min=0))
@bind_schema(QobjMeasurementOptionSchema)
class QobjMeasurementOption(BaseModel):
"""Model for QobjMeasurementOption.
Please note that this class only describes the required fields. For the
full description of the model, please check ``QobjMeasurementOptionSchema``.
Attributes:
name (str): name of option specified in the backend
params (dict): measurement parameter
"""
def __init__(self, name, params, **kwargs):
self.name = name
self.params = params
super().__init__(**kwargs)
@bind_schema(PulseLibraryItemSchema)
class PulseLibraryItem(BaseModel):
"""Model for PulseLibraryItem.
Please note that this class only describes the required fields. For the
full description of the model, please check ``PulseLibraryItemSchema``.
Attributes:
name (str): name of pulse
samples (list[complex]]): list of complex values defining pulse shape
"""
def __init__(self, name, samples, **kwargs):
self.name = name
self.samples = samples
super().__init__(**kwargs)
@bind_schema(PulseQobjInstructionSchema)
class PulseQobjInstruction(QobjInstruction):
"""Model for PulseQobjInstruction inherit from QobjInstruction.
Please note that this class only describes the required fields. For the
full description of the model, please check ``PulseQobjInstructionSchema``.
Attributes:
name (str): name of the instruction
t0 (int): timing of executing the instruction
"""
def __init__(self, name, t0, **kwargs):
# pylint: disable=invalid-name
self.t0 = t0
super().__init__(name=name,
t0=t0,
**kwargs)
@bind_schema(PulseQobjExperimentConfigSchema)
class PulseQobjExperimentConfig(QobjExperimentConfig):
"""Model for PulseQobjExperimentConfig inherit from QobjExperimentConfig.
Please note that this class only describes the required fields. For the
full description of the model, please check ``PulseQobjExperimentConfigSchema``.
"""
pass
@bind_schema(PulseQobjExperimentSchema)
class PulseQobjExperiment(QobjExperiment):
"""Model for PulseQobjExperiment inherit from QobjExperiment.
Please note that this class only describes the required fields. For the
full description of the model, please check ``PulseQobjExperimentSchema``.
Attributes:
instructions (list[PulseQobjInstruction]): list of instructions.
"""
def __init__(self, instructions, **kwargs):
super().__init__(instructions=instructions,
**kwargs)
@bind_schema(PulseQobjConfigSchema)
class PulseQobjConfig(QobjConfig):
"""Model for PulseQobjConfig inherit from QobjConfig.
Please note that this class only describes the required fields. For the
full description of the model, please check ``PulseQobjConfigSchema``.
Attributes:
meas_level (int): a value represents the level of measurement.
meas_lo_freq (list[float]): local oscillator frequency of measurement pulse.
meas_return (str): a level of measurement information.
pulse_library (list[qiskit.qobj.PulseLibraryItem]): a pulse library.
qubit_lo_freq (list[float]): local oscillator frequency of driving pulse.
"""
def __init__(self, meas_level, meas_return, pulse_library,
qubit_lo_freq, meas_lo_freq, **kwargs):
self.meas_level = meas_level
self.meas_return = meas_return
self.pulse_library = pulse_library
self.qubit_lo_freq = qubit_lo_freq
self.meas_lo_freq = meas_lo_freq
super().__init__(meas_level=meas_level,
meas_return=meas_return,
pulse_library=pulse_library,
qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
**kwargs)
|
|
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher, prefetch_related_objects
from django.db.models.sql import Query
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Article, Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book,
Bookmark, BookReview, BookWithYear, Comment, Department, Employee,
FavoriteAuthors, House, LessonEntry, ModelIterableSubclass, Person,
Qualification, Reader, Room, TaggedItem, Teacher, WordEntry,
)
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
class PrefetchRelatedTests(TestDataMixin, TestCase):
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create([
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
])
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related('bio')
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed after going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_prefetch_eq(self):
prefetch_1 = Prefetch('authors', queryset=Author.objects.all())
prefetch_2 = Prefetch('books', queryset=Book.objects.all())
self.assertEqual(prefetch_1, prefetch_1)
self.assertEqual(prefetch_1, mock.ANY)
self.assertNotEqual(prefetch_1, prefetch_2)
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
'add_q',
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(Book.objects.prefetch_related('authors'))
self.assertEqual(add_q_mock.call_count, 1)
class RawQuerySetTests(TestDataMixin, TestCase):
def test_basic(self):
with self.assertNumQueries(2):
books = Book.objects.raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
).prefetch_related('authors')
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_prefetch_before_raw(self):
with self.assertNumQueries(2):
books = Book.objects.prefetch_related('authors').raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
)
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.raw(
"SELECT * FROM prefetch_related_author"
).prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
# lookup.queryset shouldn't be evaluated.
with self.assertNumQueries(3):
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all()),
),
[['houses', 'rooms']],
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch('houses', to_attr='some_attr_name'),
Prefetch('houses', queryset=House.objects.prefetch_related('main_room')),
)
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=occupants))
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [['occupants', 'houses', 'main_room']])
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass)
Prefetch('teachers', Teacher.objects_custom.all())
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
'add_q',
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(House.objects.prefetch_related(
Prefetch('occupants', queryset=Person.objects.all())
))
self.assertEqual(add_q_mock.call_count, 1)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_prefetch_GFK_uuid_pk(self):
article = Article.objects.create(name='Django')
Comment.objects.create(comment='awesome', content_object_uuid=article)
qs = Comment.objects.prefetch_related('content_object_uuid')
self.assertEqual([c.content_object_uuid for c in qs], [article])
def test_prefetch_GFK_fk_pk(self):
book = Book.objects.create(title='Poems')
book_with_year = BookWithYear.objects.create(book=book, published_year=2019)
Comment.objects.create(comment='awesome', content_object=book_with_year)
qs = Comment.objects.prefetch_related('content_object')
self.assertEqual([c.content_object for c in qs], [book_with_year])
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
databases = {'default', 'other'}
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class DirectPrefetchedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
cls.bookwithyear1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12])
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1])
self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), [])
self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1])
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
bookwithyear1, = BookWithYear.objects.prefetch_related('bookreview_set')
with self.assertNumQueries(0):
self.assertCountEqual(bookwithyear1.bookreview_set.all(), [self.bookreview1])
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], 'bookreview_set')
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1, new_review])
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Les confessions Volume I')
cls.book2 = Book.objects.create(title='Candide')
cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70)
cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
'author',
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch('favorite_authors__first_book'),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
|
|
from __future__ import unicode_literals
import argparse
import re
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from six.moves import range
from rbtools import commands
from rbtools.commands import OptionGroup, Option
class CommandClassNotFound(Exception):
def __init__(self, directive, classname):
self.classname = classname
self.error_node = [
directive.state_machine.reporter.error(
str(self),
line=directive.lineno)
]
def __str__(self):
return ('Unable to import the RBTools command class "%s"'
% self.classname)
class CommandDirective(Directive):
"""Sets up a doc page for an RBTools command.
This will load the given command class, store some state on it for
other directives to use, and then set up both targets and program
domains. This must be used at the top of the page before any other
RBTools command-related directive is used.
"""
required_arguments = 1
def run(self):
doc = self.state.document
env = doc.settings.env
class_name = self.arguments[0].strip()
try:
cmd_class = self.get_command_class(class_name)
except CommandClassNotFound as e:
return e.error_node
# Add the class's file, this extension, and the file containing the
# global list of common arguments to the dependencies.
for dep in (__file__, commands.__file__,
sys.modules[cmd_class.__module__].__file__):
env.note_dependency(dep)
name = 'rbt-%s' % cmd_class.name
env.temp_data.update({
'rbt-command:class': cmd_class,
'rbt-command:doc-prefix': name,
})
target_node = nodes.target('', '', ids=[name], names=[name])
doc.note_explicit_target(target_node)
program_node = parse_text(
self,
'.. rbtcommand:: rbt %(command_name)s\n'
'.. program:: rbt %(command_name)s'
% {
'command_name': cmd_class.name,
})
return [program_node, target_node]
def get_command_class(self, class_name):
try:
return self.get_from_module(class_name)
except ImportError:
raise CommandClassNotFound(self, class_name)
def get_from_module(self, name):
i = name.rfind('.')
module, attr = name[:i], name[i + 1:]
try:
mod = __import__(module, {}, {}, [attr])
return getattr(mod, attr)
except AttributeError:
raise ImportError
except ImportError:
raise
class CommandUsageDirective(Directive):
"""Outputs usage information for a command.
This outputs a section containing the usage information for a command.
It's similar to what's shown on the command line when using --help.
"""
def run(self):
env = self.state.document.settings.env
cmd_class = env.temp_data['rbt-command:class']
cmd = cmd_class()
parser = cmd.create_parser({})
formatter = argparse.HelpFormatter(prog='rbt')
formatter.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups,
prefix='')
usage = '$ %s' % formatter.format_help().splitlines()[0]
section = nodes.section(ids=[
'%s-usage' % env.temp_data['rbt-command:doc-prefix'],
])
section += nodes.title(text='Usage')
section += nodes.literal_block(usage, usage, classes=['cmdline'])
# Insert this as a main section under the document section.
add_at_section_level(self, 1, [section])
return []
class CommandOptionsDirective(Directive):
"""Outputs the list of options, grouped by section, for a command.
This goes through all the options and option groups for a command,
outputting option documentation for them. This includes any meta
variables, information on the defaults, and how to override the
defaults.
The option information is taken from the metadata passed to Option()
instances.
"""
CMD_REF_RE = re.compile(r'`rbt ([a-z-]+)`')
OPT_REF_RE = re.compile(r'(--[a-z-]+(="[^"]+")?)')
BACKTICK_RE = re.compile(r'(?<![:`])`([^`:]+)`')
def run(self):
doc = self.state.document
env = doc.settings.env
self.cmd_class = env.temp_data['rbt-command:class']
options, option_groups = self.get_options_and_groups()
name = '%s-options' % env.temp_data['rbt-command:doc-prefix']
target_node = nodes.target('', '', ids=[name], names=[name])
doc.note_explicit_target(target_node)
section = nodes.section(ids=[name])
section += nodes.title(text='Options')
section += self.output_option_list(options)
section += self.output_option_list(self.cmd_class._global_options)
for option_group in option_groups:
section += self.output_opt_group(option_group)
# Insert this as a main section under the document section.
add_at_section_level(self, 1, [target_node, section])
return []
def get_options_and_groups(self):
options = []
option_groups = []
for i in self.cmd_class.option_list:
if isinstance(i, Option):
options.append(i)
elif isinstance(i, OptionGroup):
option_groups.append(i)
else:
raise ValueError('Invalid item %r found in %r option list'
% (i, self.cmd_class))
return options, option_groups
def output_option_list(self, option_list):
result = []
for i in option_list:
result += self.output_option(i)
return result
def output_opt_section(self, name):
env = self.state.document.settings.env
section = nodes.section(ids=[
'%s-%s'
% (env.temp_data['rbt-command:doc-prefix'],
name.lower().replace(' ', '-')),
])
section += nodes.title(text=name)
return section
def output_opt_group(self, opt_group):
section = self.output_opt_section(opt_group.name)
if opt_group.description:
section += nodes.paragraph(text=opt_group.description)
section += self.output_option_list(opt_group.option_list)
return [section]
def output_option(self, option):
default_text = ''
content = [self.format_content(option.attrs['help'])]
if 'extended_help' in option.attrs:
content.append(self.format_content(option.attrs['extended_help']))
if 'default' in option.attrs:
action = option.attrs.get('action', 'store')
default = option.attrs.get('default')
if action == 'store_true' and default is True:
default_text = 'This option is set by default.'
elif action == 'store_false' and default is False:
default_text = 'This option is set by default.'
elif action == 'store' and default is not None:
default_text = ('If not specified, ``%s`` is used by default.'
% default)
if 'config_key' in option.attrs:
if default_text:
default_text += (
' The default can be changed by setting :rbtconfig:`%s` '
'in :ref:`rbtools-reviewboardrc`.'
% option.attrs['config_key']
)
else:
default_text = (
'The default can be set in :rbtconfig:`%s` in '
':ref:`rbtools-reviewboardrc`.'
% option.attrs['config_key']
)
if default_text:
content.append(default_text)
if 'metavar' in option.attrs:
norm_metavar = option.attrs['metavar'].lower().replace('_', ' ')
option_args = ', '.join([
'%s <%s>' % (option_name, norm_metavar)
for option_name in option.opts
])
elif 'choices' in option.attrs:
norm_choices = '|'.join(option.attrs['choices'])
if option.attrs.get('default') is not None:
norm_choices = '[%s]' % norm_choices
else:
norm_choices = '<%s>' % norm_choices
option_args = ', '.join([
'%s %s' % (option_name, norm_choices)
for option_name in option.opts
])
else:
option_args = ', '.join(option.opts)
if 'deprecated_in' in option.attrs:
content.append('.. deprecated:: %s'
% option.attrs['deprecated_in'])
if 'added_in' in option.attrs:
content.append('.. versionadded:: %s' % option.attrs['added_in'])
if 'versions_changed' in option.attrs:
versions_changed = option.attrs['versions_changed']
for version in sorted(versions_changed, reverse=True):
content.append(
'.. versionchanged:: %s\n%s' % (
version,
self.indent_content(
self.format_content(versions_changed[version]),
indent_level=2)))
node = parse_text(
self,
'.. cmdoption:: %s\n\n%s'
% (option_args, self.indent_content('\n\n'.join(content))))
return [node]
def indent_content(self, content, indent_level=1):
indent_str = ' ' * indent_level
return '\n'.join([
'%s%s' % (indent_str, line)
for line in content.splitlines()
])
def format_content(self, content):
content = content.replace('\n', '\n\n')
content = content.replace('*', '\\*')
content = content.replace('.reviewboardrc', ':file:`.reviewboardrc`')
content = self.CMD_REF_RE.sub(r':ref:`rbt \1 <rbt-\1>`', content)
content = self.OPT_REF_RE.sub(r':option:`\1`', content)
content = self.BACKTICK_RE.sub(r'``\1``', content)
return content
def parse_text(directive, text, node_type=nodes.paragraph, where=None):
"""Parses text in ReST format and returns a node with the content."""
assert text is not None, 'Missing text during parse_text in %s' % where
vl = ViewList()
for line in text.split('\n'):
vl.append(line, line)
node = node_type(rawsource=text)
directive.state.nested_parse(vl, 0, node)
return node
def add_at_section_level(node, level, nodes):
"""Adds a list of nodes at the specified section level."""
parent = node.state.parent
for i in range(level, node.state.memo.section_level):
parent = parent.parent
parent += nodes
def setup(app):
app.add_directive('rbt-command', CommandDirective)
app.add_directive('rbt-command-usage', CommandUsageDirective)
app.add_directive('rbt-command-options', CommandOptionsDirective)
app.add_crossref_type(directivename=str('rbtcommand'),
rolename=str('rbtcommand'),
indextemplate=str('pair: %s; RBTools command'))
app.add_crossref_type(
directivename=str('rbtconfig'),
rolename=str('rbtconfig'),
indextemplate=str('pair: %s; .reviewboardrc setting'))
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as signature hash functions.
This file is modified from python-syscoinlib.
"""
import hashlib
import struct
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
"""Convert number to bitcoin-specific little endian format."""
# We need v.bit_length() bits, plus a sign bit for every nonzero number.
n_bits = v.bit_length() + (v != 0)
# The number of bytes for that is:
n_bytes = (n_bits + 7) // 8
# Convert number to absolute value + sign in top bit.
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
# Serialize to bytes
return encoded_v.to_bytes(n_bytes, 'little')
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff + 1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_SMALLINTEGER: 'OP_SMALLINTEGER',
OP_PUBKEYS: 'OP_PUBKEYS',
OP_PUBKEYHASH: 'OP_PUBKEYHASH',
OP_PUBKEY: 'OP_PUBKEY',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
# We assume valid push_size and minimal encoding
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
# Mask for all but the highest result bit
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
|
|
import numpy as np
import pickle
from malpi import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various optoins (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists containing
the accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'X_train': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and return
a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, optimizer, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data with the following:
'X_train': Array of shape (N_train, d_1, ..., d_k) giving training images
'X_val': Array of shape (N_val, d_1, ..., d_k) giving validation images
'y_train': Array of shape (N_train,) giving labels for training images
'y_val': Array of shape (N_val,) giving labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the learning
rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient during
training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every print_every
iterations.
- verbose: Boolean; if set to false then no output will be printed during
training.
"""
self.model = model
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in kwargs.keys())
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self.optimizer = optimizer
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
def _stats(self, arr, msg=""):
mi = np.min(arr)
ma = np.max(arr)
av = np.mean(arr)
std = np.std(arr)
arr_abs = np.abs(arr)
mi_abs = np.min(arr_abs)
ma_abs = np.max(arr_abs)
print "%sMin/Max/Mean/Stdev abs(Min/Max): %g/%g/%g/%g %g/%g" % (msg,mi,ma,av,std,mi_abs,ma_abs)
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
X_batch = self.X_train[batch_mask]
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
self.optimizer.update(grads)
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using too
much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
N = num_samples
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N / batch_size
if N % batch_size != 0:
num_batches += 1
y_pred = []
for i in xrange(num_batches):
start = i * batch_size
end = (i + 1) * batch_size
scores = self.model.loss(X[start:end])
y_pred.append(np.argmax(scores, axis=1))
y_pred = np.hstack(y_pred)
acc = np.mean(y_pred == y)
return acc
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
iterations_per_epoch = max(num_train / self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in xrange(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print '(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1])
# At the end of every epoch, increment the epoch counter and decay the
# learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
self.optimizer.decay_learning_rate(self.lr_decay)
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations + 1)
if first_it or last_it or epoch_end:
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=1000)
val_acc = self.check_accuracy(self.X_val, self.y_val)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
if self.verbose:
print '(Epoch %d / %d) train acc: %f; val_acc: %f' % (
self.epoch, self.num_epochs, train_acc, val_acc)
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.iteritems():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
|
|
from __future__ import annotations
import base64
import binascii
import ipaddress
import re
from typing import Callable, List, Optional, Sequence, Tuple, TypeVar, cast
from . import exceptions
from .typing import (
ConnectionOption,
ExtensionHeader,
ExtensionName,
ExtensionParameter,
Subprotocol,
UpgradeProtocol,
)
__all__ = [
"build_host",
"parse_connection",
"parse_upgrade",
"parse_extension",
"build_extension",
"parse_subprotocol",
"build_subprotocol",
"validate_subprotocols",
"build_www_authenticate_basic",
"parse_authorization_basic",
"build_authorization_basic",
]
T = TypeVar("T")
def build_host(host: str, port: int, secure: bool) -> str:
"""
Build a ``Host`` header.
"""
# https://www.rfc-editor.org/rfc/rfc3986.html#section-3.2.2
# IPv6 addresses must be enclosed in brackets.
try:
address = ipaddress.ip_address(host)
except ValueError:
# host is a hostname
pass
else:
# host is an IP address
if address.version == 6:
host = f"[{host}]"
if port != (443 if secure else 80):
host = f"{host}:{port}"
return host
# To avoid a dependency on a parsing library, we implement manually the ABNF
# described in https://www.rfc-editor.org/rfc/rfc6455.html#section-9.1 and
# https://www.rfc-editor.org/rfc/rfc7230.html#appendix-B.
def peek_ahead(header: str, pos: int) -> Optional[str]:
"""
Return the next character from ``header`` at the given position.
Return :obj:`None` at the end of ``header``.
We never need to peek more than one character ahead.
"""
return None if pos == len(header) else header[pos]
_OWS_re = re.compile(r"[\t ]*")
def parse_OWS(header: str, pos: int) -> int:
"""
Parse optional whitespace from ``header`` at the given position.
Return the new position.
The whitespace itself isn't returned because it isn't significant.
"""
# There's always a match, possibly empty, whose content doesn't matter.
match = _OWS_re.match(header, pos)
assert match is not None
return match.end()
_token_re = re.compile(r"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+")
def parse_token(header: str, pos: int, header_name: str) -> Tuple[str, int]:
"""
Parse a token from ``header`` at the given position.
Return the token value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _token_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(header_name, "expected token", header, pos)
return match.group(), match.end()
_quoted_string_re = re.compile(
r'"(?:[\x09\x20-\x21\x23-\x5b\x5d-\x7e]|\\[\x09\x20-\x7e\x80-\xff])*"'
)
_unquote_re = re.compile(r"\\([\x09\x20-\x7e\x80-\xff])")
def parse_quoted_string(header: str, pos: int, header_name: str) -> Tuple[str, int]:
"""
Parse a quoted string from ``header`` at the given position.
Return the unquoted value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _quoted_string_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(
header_name, "expected quoted string", header, pos
)
return _unquote_re.sub(r"\1", match.group()[1:-1]), match.end()
_quotable_re = re.compile(r"[\x09\x20-\x7e\x80-\xff]*")
_quote_re = re.compile(r"([\x22\x5c])")
def build_quoted_string(value: str) -> str:
"""
Format ``value`` as a quoted string.
This is the reverse of :func:`parse_quoted_string`.
"""
match = _quotable_re.fullmatch(value)
if match is None:
raise ValueError("invalid characters for quoted-string encoding")
return '"' + _quote_re.sub(r"\\\1", value) + '"'
def parse_list(
parse_item: Callable[[str, int, str], Tuple[T, int]],
header: str,
pos: int,
header_name: str,
) -> List[T]:
"""
Parse a comma-separated list from ``header`` at the given position.
This is appropriate for parsing values with the following grammar:
1#item
``parse_item`` parses one item.
``header`` is assumed not to start or end with whitespace.
(This function is designed for parsing an entire header value and
:func:`~websockets.http.read_headers` strips whitespace from values.)
Return a list of items.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
# Per https://www.rfc-editor.org/rfc/rfc7230.html#section-7, "a recipient
# MUST parse and ignore a reasonable number of empty list elements";
# hence while loops that remove extra delimiters.
# Remove extra delimiters before the first item.
while peek_ahead(header, pos) == ",":
pos = parse_OWS(header, pos + 1)
items = []
while True:
# Loop invariant: a item starts at pos in header.
item, pos = parse_item(header, pos, header_name)
items.append(item)
pos = parse_OWS(header, pos)
# We may have reached the end of the header.
if pos == len(header):
break
# There must be a delimiter after each element except the last one.
if peek_ahead(header, pos) == ",":
pos = parse_OWS(header, pos + 1)
else:
raise exceptions.InvalidHeaderFormat(
header_name, "expected comma", header, pos
)
# Remove extra delimiters before the next item.
while peek_ahead(header, pos) == ",":
pos = parse_OWS(header, pos + 1)
# We may have reached the end of the header.
if pos == len(header):
break
# Since we only advance in the header by one character with peek_ahead()
# or with the end position of a regex match, we can't overshoot the end.
assert pos == len(header)
return items
def parse_connection_option(
header: str, pos: int, header_name: str
) -> Tuple[ConnectionOption, int]:
"""
Parse a Connection option from ``header`` at the given position.
Return the protocol value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(ConnectionOption, item), pos
def parse_connection(header: str) -> List[ConnectionOption]:
"""
Parse a ``Connection`` header.
Return a list of HTTP connection options.
Args
header: value of the ``Connection`` header.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_connection_option, header, 0, "Connection")
_protocol_re = re.compile(
r"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+(?:/[-!#$%&\'*+.^_`|~0-9a-zA-Z]+)?"
)
def parse_upgrade_protocol(
header: str, pos: int, header_name: str
) -> Tuple[UpgradeProtocol, int]:
"""
Parse an Upgrade protocol from ``header`` at the given position.
Return the protocol value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _protocol_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(
header_name, "expected protocol", header, pos
)
return cast(UpgradeProtocol, match.group()), match.end()
def parse_upgrade(header: str) -> List[UpgradeProtocol]:
"""
Parse an ``Upgrade`` header.
Return a list of HTTP protocols.
Args:
header: value of the ``Upgrade`` header.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_upgrade_protocol, header, 0, "Upgrade")
def parse_extension_item_param(
header: str, pos: int, header_name: str
) -> Tuple[ExtensionParameter, int]:
"""
Parse a single extension parameter from ``header`` at the given position.
Return a ``(name, value)`` pair and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
# Extract parameter name.
name, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
# Extract parameter value, if there is one.
value: Optional[str] = None
if peek_ahead(header, pos) == "=":
pos = parse_OWS(header, pos + 1)
if peek_ahead(header, pos) == '"':
pos_before = pos # for proper error reporting below
value, pos = parse_quoted_string(header, pos, header_name)
# https://www.rfc-editor.org/rfc/rfc6455.html#section-9.1 says:
# the value after quoted-string unescaping MUST conform to
# the 'token' ABNF.
if _token_re.fullmatch(value) is None:
raise exceptions.InvalidHeaderFormat(
header_name, "invalid quoted header content", header, pos_before
)
else:
value, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
return (name, value), pos
def parse_extension_item(
header: str, pos: int, header_name: str
) -> Tuple[ExtensionHeader, int]:
"""
Parse an extension definition from ``header`` at the given position.
Return an ``(extension name, parameters)`` pair, where ``parameters`` is a
list of ``(name, value)`` pairs, and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
# Extract extension name.
name, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
# Extract all parameters.
parameters = []
while peek_ahead(header, pos) == ";":
pos = parse_OWS(header, pos + 1)
parameter, pos = parse_extension_item_param(header, pos, header_name)
parameters.append(parameter)
return (cast(ExtensionName, name), parameters), pos
def parse_extension(header: str) -> List[ExtensionHeader]:
"""
Parse a ``Sec-WebSocket-Extensions`` header.
Return a list of WebSocket extensions and their parameters in this format::
[
(
'extension name',
[
('parameter name', 'parameter value'),
....
]
),
...
]
Parameter values are :obj:`None` when no value is provided.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_extension_item, header, 0, "Sec-WebSocket-Extensions")
parse_extension_list = parse_extension # alias for backwards compatibility
def build_extension_item(
name: ExtensionName, parameters: List[ExtensionParameter]
) -> str:
"""
Build an extension definition.
This is the reverse of :func:`parse_extension_item`.
"""
return "; ".join(
[cast(str, name)]
+ [
# Quoted strings aren't necessary because values are always tokens.
name if value is None else f"{name}={value}"
for name, value in parameters
]
)
def build_extension(extensions: Sequence[ExtensionHeader]) -> str:
"""
Build a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension`.
"""
return ", ".join(
build_extension_item(name, parameters) for name, parameters in extensions
)
build_extension_list = build_extension # alias for backwards compatibility
def parse_subprotocol_item(
header: str, pos: int, header_name: str
) -> Tuple[Subprotocol, int]:
"""
Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(Subprotocol, item), pos
def parse_subprotocol(header: str) -> List[Subprotocol]:
"""
Parse a ``Sec-WebSocket-Protocol`` header.
Return a list of WebSocket subprotocols.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_subprotocol_item, header, 0, "Sec-WebSocket-Protocol")
parse_subprotocol_list = parse_subprotocol # alias for backwards compatibility
def build_subprotocol(subprotocols: Sequence[Subprotocol]) -> str:
"""
Build a ``Sec-WebSocket-Protocol`` header.
This is the reverse of :func:`parse_subprotocol`.
"""
return ", ".join(subprotocols)
build_subprotocol_list = build_subprotocol # alias for backwards compatibility
def validate_subprotocols(subprotocols: Sequence[Subprotocol]) -> None:
"""
Validate that ``subprotocols`` is suitable for :func:`build_subprotocol`.
"""
if not isinstance(subprotocols, Sequence):
raise TypeError("subprotocols must be a list")
if isinstance(subprotocols, str):
raise TypeError("subprotocols must be a list, not a str")
for subprotocol in subprotocols:
if not _token_re.fullmatch(subprotocol):
raise ValueError(f"invalid subprotocol: {subprotocol}")
def build_www_authenticate_basic(realm: str) -> str:
"""
Build a ``WWW-Authenticate`` header for HTTP Basic Auth.
Args:
realm: identifier of the protection space.
"""
# https://www.rfc-editor.org/rfc/rfc7617.html#section-2
realm = build_quoted_string(realm)
charset = build_quoted_string("UTF-8")
return f"Basic realm={realm}, charset={charset}"
_token68_re = re.compile(r"[A-Za-z0-9-._~+/]+=*")
def parse_token68(header: str, pos: int, header_name: str) -> Tuple[str, int]:
"""
Parse a token68 from ``header`` at the given position.
Return the token value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _token68_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(
header_name, "expected token68", header, pos
)
return match.group(), match.end()
def parse_end(header: str, pos: int, header_name: str) -> None:
"""
Check that parsing reached the end of header.
"""
if pos < len(header):
raise exceptions.InvalidHeaderFormat(header_name, "trailing data", header, pos)
def parse_authorization_basic(header: str) -> Tuple[str, str]:
"""
Parse an ``Authorization`` header for HTTP Basic Auth.
Return a ``(username, password)`` tuple.
Args:
header: value of the ``Authorization`` header.
Raises:
InvalidHeaderFormat: on invalid inputs.
InvalidHeaderValue: on unsupported inputs.
"""
# https://www.rfc-editor.org/rfc/rfc7235.html#section-2.1
# https://www.rfc-editor.org/rfc/rfc7617.html#section-2
scheme, pos = parse_token(header, 0, "Authorization")
if scheme.lower() != "basic":
raise exceptions.InvalidHeaderValue(
"Authorization",
f"unsupported scheme: {scheme}",
)
if peek_ahead(header, pos) != " ":
raise exceptions.InvalidHeaderFormat(
"Authorization", "expected space after scheme", header, pos
)
pos += 1
basic_credentials, pos = parse_token68(header, pos, "Authorization")
parse_end(header, pos, "Authorization")
try:
user_pass = base64.b64decode(basic_credentials.encode()).decode()
except binascii.Error:
raise exceptions.InvalidHeaderValue(
"Authorization",
"expected base64-encoded credentials",
) from None
try:
username, password = user_pass.split(":", 1)
except ValueError:
raise exceptions.InvalidHeaderValue(
"Authorization",
"expected username:password credentials",
) from None
return username, password
def build_authorization_basic(username: str, password: str) -> str:
"""
Build an ``Authorization`` header for HTTP Basic Auth.
This is the reverse of :func:`parse_authorization_basic`.
"""
# https://www.rfc-editor.org/rfc/rfc7617.html#section-2
assert ":" not in username
user_pass = f"{username}:{password}"
basic_credentials = base64.b64encode(user_pass.encode()).decode()
return "Basic " + basic_credentials
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for generating and updating index.yaml."""
__all__ = ['GenerateIndexFromHistory',
'IndexYamlUpdater',
]
import os
import logging
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import yaml_errors
from google.appengine.datastore import datastore_index
import yaml
AUTO_MARKER = '\n# AUTOGENERATED\n'
AUTO_COMMENT = '''
# This index.yaml is automatically updated whenever the dev_appserver
# detects that a new type of query is run. If you want to manage the
# index.yaml file manually, remove the above marker line (the line
# saying "# AUTOGENERATED"). If you want to manage some indexes
# manually, move them above the marker line. The index.yaml file is
# automatically uploaded to the admin console when you next deploy
# your application using appcfg.py.
'''
def GenerateIndexFromHistory(query_history,
all_indexes=None, manual_indexes=None):
"""Generate most of the text for index.yaml from the query history.
Args:
query_history: Query history, a dict mapping query
all_indexes: Optional datastore_index.IndexDefinitions instance
representing all the indexes found in the input file. May be None.
manual_indexes: Optional datastore_index.IndexDefinitions instance
containing indexes for which we should not generate output. May be None.
Returns:
A string representation that can safely be appended to an existing
index.yaml file. Returns the empty string if it would generate no output.
"""
all_keys = datastore_index.IndexDefinitionsToKeys(all_indexes)
manual_keys = datastore_index.IndexDefinitionsToKeys(manual_indexes)
indexes = dict((key, 0) for key in all_keys - manual_keys)
for query, count in query_history.iteritems():
required, kind, ancestor, props = (
datastore_index.CompositeIndexForQuery(query))
if required:
props = datastore_index.GetRecommendedIndexProperties(props)
key = (kind, ancestor, props)
if key not in manual_keys:
if key in indexes:
indexes[key] += count
else:
indexes[key] = count
if not indexes:
return ''
res = []
for (kind, ancestor, props), count in sorted(indexes.iteritems()):
res.append('')
res.append(datastore_index.IndexYamlForQuery(kind, ancestor, props))
res.append('')
return '\n'.join(res)
class IndexYamlUpdater(object):
"""Helper class for updating index.yaml.
This class maintains some state about the query history and the
index.yaml file in order to minimize the number of times index.yaml
is actually overwritten.
"""
index_yaml_is_manual = False
index_yaml_mtime = None
last_history_size = 0
def __init__(self, root_path):
"""Constructor.
Args:
root_path: Path to the app's root directory.
"""
self.root_path = root_path
def UpdateIndexYaml(self, openfile=open):
"""Update index.yaml.
Args:
openfile: Used for dependency injection.
We only ever write to index.yaml if either:
- it doesn't exist yet; or
- it contains an 'AUTOGENERATED' comment.
All indexes *before* the AUTOGENERATED comment will be written
back unchanged. All indexes *after* the AUTOGENERATED comment
will be updated with the latest query counts (query counts are
reset by --clear_datastore). Indexes that aren't yet in the file
will be appended to the AUTOGENERATED section.
We keep track of some data in order to avoid doing repetitive work:
- if index.yaml is fully manual, we keep track of its mtime to
avoid parsing it over and over;
- we keep track of the number of keys in the history dict since
the last time we updated index.yaml (or decided there was
nothing to update).
"""
index_yaml_file = os.path.join(self.root_path, 'index.yaml')
try:
index_yaml_mtime = os.path.getmtime(index_yaml_file)
except os.error:
index_yaml_mtime = None
index_yaml_changed = (index_yaml_mtime != self.index_yaml_mtime)
self.index_yaml_mtime = index_yaml_mtime
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
query_ci_history_len = datastore_stub._QueryCompositeIndexHistoryLength()
history_changed = (query_ci_history_len != self.last_history_size)
self.last_history_size = query_ci_history_len
if not (index_yaml_changed or history_changed):
logging.debug('No need to update index.yaml')
return
if self.index_yaml_is_manual and not index_yaml_changed:
logging.debug('Will not update manual index.yaml')
return
if index_yaml_mtime is None:
index_yaml_data = None
else:
try:
fh = openfile(index_yaml_file, 'rU')
except IOError:
index_yaml_data = None
else:
try:
index_yaml_data = fh.read()
finally:
fh.close()
self.index_yaml_is_manual = (index_yaml_data is not None and
AUTO_MARKER not in index_yaml_data)
if self.index_yaml_is_manual:
logging.info('Detected manual index.yaml, will not update')
return
if index_yaml_data is None:
all_indexes = None
else:
try:
all_indexes = datastore_index.ParseIndexDefinitions(index_yaml_data)
except yaml_errors.EventListenerError, e:
logging.error('Error parsing %s:\n%s', index_yaml_file, e)
return
except Exception, err:
logging.error('Error parsing %s:\n%s.%s: %s', index_yaml_file,
err.__class__.__module__, err.__class__.__name__, err)
return
if index_yaml_data is None:
manual_part, prev_automatic_part = 'indexes:\n', ''
manual_indexes = None
else:
manual_part, prev_automatic_part = index_yaml_data.split(AUTO_MARKER, 1)
if prev_automatic_part.startswith(AUTO_COMMENT):
prev_automatic_part = prev_automatic_part[len(AUTO_COMMENT):]
try:
manual_indexes = datastore_index.ParseIndexDefinitions(manual_part)
except Exception, err:
logging.error('Error parsing manual part of %s: %s',
index_yaml_file, err)
return
automatic_part = GenerateIndexFromHistory(datastore_stub.QueryHistory(),
all_indexes, manual_indexes)
if (index_yaml_mtime is None and automatic_part == '' or
automatic_part == prev_automatic_part):
logging.debug('No need to update index.yaml')
return
try:
fh = openfile(index_yaml_file, 'w')
except IOError, err:
logging.error('Can\'t write index.yaml: %s', err)
return
try:
logging.info('Updating %s', index_yaml_file)
fh.write(manual_part)
fh.write(AUTO_MARKER)
fh.write(AUTO_COMMENT)
fh.write(automatic_part)
finally:
fh.close()
try:
self.index_yaml_mtime = os.path.getmtime(index_yaml_file)
except os.error, err:
logging.error('Can\'t stat index.yaml we just wrote: %s', err)
self.index_yaml_mtime = None
|
|
import time
import warnings
from datetime import datetime, timedelta
from StringIO import StringIO
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest, LimitedStream
from django.http import HttpRequest, HttpResponse, parse_cookie, build_request_repr, UnreadablePostError
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import unittest
from django.utils.http import cookie_date
from django.utils.timezone import utc
class RequestsTests(unittest.TestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_httprequest_repr(self):
request = HttpRequest()
request.path = u'/somepath/'
request.GET = {u'get-key': u'get-value'}
request.POST = {u'post-key': u'post-value'}
request.COOKIES = {u'post-key': u'post-value'}
request.META = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<HttpRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<HttpRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': StringIO('')})
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input']))
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_wsgirequest_repr(self):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': StringIO('')})
request.GET = {u'get-key': u'get-value'}
request.POST = {u'post-key': u'post-value'}
request.COOKIES = {u'post-key': u'post-value'}
request.META = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<WSGIRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<WSGIRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_http_get_host(self):
old_USE_X_FORWARDED_HOST = settings.USE_X_FORWARDED_HOST
try:
settings.USE_X_FORWARDED_HOST = False
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
u'HTTP_X_FORWARDED_HOST': u'forward.com',
u'HTTP_HOST': u'example.com',
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
u'HTTP_HOST': u'example.com',
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
finally:
settings.USE_X_FORWARDED_HOST = old_USE_X_FORWARDED_HOST
def test_http_get_host_with_x_forwarded_host(self):
old_USE_X_FORWARDED_HOST = settings.USE_X_FORWARDED_HOST
try:
settings.USE_X_FORWARDED_HOST = True
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
u'HTTP_X_FORWARDED_HOST': u'forward.com',
u'HTTP_HOST': u'example.com',
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
u'HTTP_HOST': u'example.com',
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
u'SERVER_NAME': u'internal.com',
u'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
finally:
settings.USE_X_FORWARDED_HOST = old_USE_X_FORWARDED_HOST
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_aware_expiration(self):
"Cookie accepts an aware datetime as expiration time"
response = HttpResponse()
expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertTrue('; httponly' in str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(), 'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), '')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(5), 'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read sequentially from a stream
stream = LimitedStream(StringIO('12345678'), 8)
self.assertEqual(stream.read(5), '12345')
self.assertEqual(stream.read(5), '678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read lines from a stream
stream = LimitedStream(StringIO('1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), '1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), '56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), '78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), 'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), 'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), '\n')
# Read everything else.
self.assertEqual(stream.readline(), 'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), '1234\n')
self.assertEqual(stream.readline(3), 'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), 'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), '')
# Same test, but with read, not readline.
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.read(6), '1234\na')
self.assertEqual(stream.read(2), 'bc')
self.assertEqual(stream.read(2), 'd')
self.assertEqual(stream.read(2), '')
self.assertEqual(stream.read(), '')
def test_stream(self):
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.read(), 'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertEqual(request.body, 'name=value')
self.assertEqual(request.read(), 'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.read(2), 'na')
self.assertRaises(Exception, lambda: request.body)
self.assertEqual(request.POST, {})
def test_body_after_POST_multipart(self):
"""
Reading body after parsing multipart is not allowed
"""
# Because multipart is used for large amounts fo data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertRaises(Exception, lambda: request.body)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {})
def test_read_by_lines(self):
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(list(request), ['name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
raw_data = request.body
self.assertEqual(request.POST, {u'name': [u'value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
raw_data = request.body
self.assertEqual(request.read(1), u'n')
self.assertEqual(request.POST, {u'name': [u'value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
raw_data = request.body
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), u'--boundary\r\nC')
self.assertEqual(request.POST, {u'name': [u'value']})
def test_raw_post_data_returns_body(self):
"""
HttpRequest.raw_post_body should be the same as HttpRequest.body
"""
payload = 'Hello There!'
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)
})
warnings_state = get_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning, module='django.http')
try:
self.assertEqual(request.body, request.raw_post_data)
finally:
restore_warnings_state(warnings_state)
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingStringIO(StringIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = 'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingStringIO(payload)})
with self.assertRaises(UnreadablePostError):
request.raw_post_data
|
|
import importlib.util
import itertools
import os
import re
import shutil
from collections import defaultdict
from typing import Optional, IO, Dict, List
import pytest
import numpy as np
from numpy.typing.mypy_plugin import (
_PRECISION_DICT,
_EXTENDED_PRECISION_LIST,
_C_INTP,
)
try:
from mypy import api
except ImportError:
NO_MYPY = True
else:
NO_MYPY = False
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
MISC_DIR = os.path.join(DATA_DIR, "misc")
MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
#: A dictionary with file names as keys and lists of the mypy stdout as values.
#: To-be populated by `run_mypy`.
OUTPUT_MYPY: Dict[str, List[str]] = {}
def _key_func(key: str) -> str:
"""Split at the first occurance of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
drive, tail = os.path.splitdrive(key)
return os.path.join(drive, tail.split(":", 1)[0])
def _strip_filename(msg: str) -> str:
"""Strip the filename from a mypy message."""
_, tail = os.path.splitdrive(msg)
return tail.split(":", 1)[-1]
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.fixture(scope="module", autouse=True)
def run_mypy() -> None:
"""Clears the cache and run mypy before running any of the typing tests.
The mypy results are cached in `OUTPUT_MYPY` for further use.
The cache refresh can be skipped using
NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests
"""
if os.path.isdir(CACHE_DIR) and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)):
shutil.rmtree(CACHE_DIR)
for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR):
# Run mypy
stdout, stderr, exit_code = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
directory,
])
if stderr:
pytest.fail(f"Unexpected mypy standard error\n\n{stderr}")
elif exit_code not in {0, 1}:
pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}")
stdout = stdout.replace('*', '')
# Parse the output
iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def get_test_cases(directory):
for root, _, files in os.walk(directory):
for fname in files:
if os.path.splitext(fname)[-1] == ".py":
fullpath = os.path.join(root, fname)
# Use relative path for nice py.test name
relpath = os.path.relpath(fullpath, start=directory)
yield pytest.param(
fullpath,
# Manually specify a name for the test
id=relpath,
)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
msg = "Unexpected mypy output\n\n"
msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
raise AssertionError(msg)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group('lineno'))
errors[lineno] += f'{error_line}\n'
for i, line in enumerate(lines):
lineno = i + 1
if line.startswith('#') or (" E:" not in line and lineno not in errors):
continue
target_line = lines[lineno - 1]
if "# E:" in target_line:
marker = target_line.split("# E:")[-1].strip()
expected_error = errors.get(lineno)
_test_fail(path, marker, expected_error, lineno)
else:
pytest.fail(f"Unexpected mypy output\n\n{errors[lineno]}")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error))
def _construct_format_dict():
dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for
k, v in _PRECISION_DICT.items()}
return {
"uint8": "numpy.unsignedinteger[numpy.typing._8Bit]",
"uint16": "numpy.unsignedinteger[numpy.typing._16Bit]",
"uint32": "numpy.unsignedinteger[numpy.typing._32Bit]",
"uint64": "numpy.unsignedinteger[numpy.typing._64Bit]",
"uint128": "numpy.unsignedinteger[numpy.typing._128Bit]",
"uint256": "numpy.unsignedinteger[numpy.typing._256Bit]",
"int8": "numpy.signedinteger[numpy.typing._8Bit]",
"int16": "numpy.signedinteger[numpy.typing._16Bit]",
"int32": "numpy.signedinteger[numpy.typing._32Bit]",
"int64": "numpy.signedinteger[numpy.typing._64Bit]",
"int128": "numpy.signedinteger[numpy.typing._128Bit]",
"int256": "numpy.signedinteger[numpy.typing._256Bit]",
"float16": "numpy.floating[numpy.typing._16Bit]",
"float32": "numpy.floating[numpy.typing._32Bit]",
"float64": "numpy.floating[numpy.typing._64Bit]",
"float80": "numpy.floating[numpy.typing._80Bit]",
"float96": "numpy.floating[numpy.typing._96Bit]",
"float128": "numpy.floating[numpy.typing._128Bit]",
"float256": "numpy.floating[numpy.typing._256Bit]",
"complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]",
"complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]",
"complex160": "numpy.complexfloating[numpy.typing._80Bit, numpy.typing._80Bit]",
"complex192": "numpy.complexfloating[numpy.typing._96Bit, numpy.typing._96Bit]",
"complex256": "numpy.complexfloating[numpy.typing._128Bit, numpy.typing._128Bit]",
"complex512": "numpy.complexfloating[numpy.typing._256Bit, numpy.typing._256Bit]",
"ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]",
"ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]",
"uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]",
"uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]",
"uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]",
"ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]",
"byte": f"numpy.signedinteger[{dct['_NBitByte']}]",
"short": f"numpy.signedinteger[{dct['_NBitShort']}]",
"intc": f"numpy.signedinteger[{dct['_NBitIntC']}]",
"intp": f"numpy.signedinteger[{dct['_NBitIntP']}]",
"int_": f"numpy.signedinteger[{dct['_NBitInt']}]",
"longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]",
"half": f"numpy.floating[{dct['_NBitHalf']}]",
"single": f"numpy.floating[{dct['_NBitSingle']}]",
"double": f"numpy.floating[{dct['_NBitDouble']}]",
"longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]",
"csingle": f"numpy.complexfloating[{dct['_NBitSingle']}, {dct['_NBitSingle']}]",
"cdouble": f"numpy.complexfloating[{dct['_NBitDouble']}, {dct['_NBitDouble']}]",
"clongdouble": f"numpy.complexfloating[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]",
# numpy.typing
"_NBitInt": dct['_NBitInt'],
# numpy.ctypeslib
"c_intp": f"ctypes.{_C_INTP}"
}
#: A dictionary with all supported format keys (as keys)
#: and matching values
FORMAT_DICT: Dict[str, str] = _construct_format_dict()
def _parse_reveals(file: IO[str]) -> List[str]:
"""Extract and parse all ``" # E: "`` comments from the passed file-like object.
All format keys will be substituted for their respective value from `FORMAT_DICT`,
*e.g.* ``"{float64}"`` becomes ``"numpy.floating[numpy.typing._64Bit]"``.
"""
string = file.read().replace("*", "")
# Grab all `# E:`-based comments
comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2]
comments = "/n".join(comments_array)
# Only search for the `{*}` pattern within comments,
# otherwise there is the risk of accidently grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set
}
fmt_str = comments.format(**kwargs)
return fmt_str.split("/n")
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group('lineno')) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None:
if reveal not in expected_reveal:
raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal))
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path):
path_without_extension, _ = os.path.splitext(path)
dirname, filename = path.split(os.sep)[-2:]
spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path)
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
LINENO_MAPPING = {
3: "uint128",
4: "uint256",
6: "int128",
7: "int256",
9: "float80",
10: "float96",
11: "float128",
12: "float256",
14: "complex160",
15: "complex192",
16: "complex256",
17: "complex512",
}
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
def test_extended_precision() -> None:
path = os.path.join(MISC_DIR, "extended_precision.py")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for _msg in output_mypy[path]:
*_, _lineno, msg_typ, msg = _msg.split(":")
msg = _strip_filename(msg)
lineno = int(_lineno)
msg_typ = msg_typ.strip()
assert msg_typ in {"error", "note"}
if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST:
if msg_typ == "error":
raise ValueError(f"Unexpected reveal line format: {lineno}")
else:
marker = FORMAT_DICT[LINENO_MAPPING[lineno]]
_test_reveal(path, marker, msg, lineno)
else:
if msg_typ == "error":
marker = "Module has no attribute"
_test_fail(path, marker, msg, lineno)
|
|
# Canvas module for simple drawing and mouse handling
# Quintin Cutts
# Last modified 20 - 9 - 09
# Last modified 8/8/2015 Ross Meikleham for python 3 compatibility
### --------------------------------------------------------
### IF YOU JUST WANT TO KNOW WHAT FUNCTIONS ARE AVAILABLE
### THEN JUMP STRAIGHT TO THE END OF THIS CODE - YOU'LL
### FIND THE FULL LIST THERE.
### --------------------------------------------------------
try:
from Tkinter import *
except ImportError:
# for Python3
from tkinter import *
import math
import threading
import time
try:
from exceptions import *
except:
pass
class WindowGone(Exception):
def __init__(self, args=[]):
self.args = args
# These are all the inner Glasgow Canvas functions
class RawCanvas:
def __init__(self):
self.mainThread = threading.currentThread()
self._events = []
self.mainLoopRunning = False
self.no_current_keyhandler_call = True # Concurrency control - stops multiple simultaneous calls of the handler
self.no_current_mousehandler_call = True
# These are the main drawing functions - calling straight through to the
# underlying Tkinter Canvas functions
def create_rectangle( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_rectangle( x1, y1, x2, y2, kw )
self._canvas._root().update()
return r
def create_arc( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_arc( x1, y1, x2, y2, kw )
self._canvas._root().update()
return r
def create_line( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_line( x1, y1, x2, y2, kw)
self._canvas._root().update()
return r
def create_oval( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_oval( x1, y1, x2, y2, kw )
self._canvas._root().update()
return r
def create_text( self, x1, y1, *kw ):
r = self._canvas.create_text( x1, y1, kw )
self._canvas._root().update()
return r
def create_image( self, x1, y1, *kw ):
r = self._canvas.create_image( x1, y1, kw )
self._canvas._root().update()
return r
def move( self, tagOrId, xInc, yInc ):
self._canvas.move( tagOrId, xInc, yInc )
self._canvas._root().update()
def delete( self, tagOrId ):
self._canvas.delete( tagOrId )
self._canvas._root().update()
def add_to_group( self, groupName, tagOrId, moreTagsOrIds ):
for tOrId in (tagOrId,) + moreTagsOrIds:
self._canvas.addtag_withtag( groupName, tOrId )
def import_image( self, filename ):
return PhotoImage( file = filename )
def set_title( self, t ):
self._canvas._root().title( t )
self._canvas._root().update()
# These two both set the mainloop running
# For this one, it's just to set a button handler to kill the window when pressed
def _normal_complete( self, m = "Click mouse to end" ):
global _can
self._canvas.unbind("<Button-1>")
self._canvas.bind("<Button-1>", _can.destroy)
wait( 0.5 )
self._canvas._root().title( m )
self._canvas._root().update()
self.run()
# and for this one, it sets the mainloop running alone, trusting the user has
# set some callbacks already.
def run( self ):
if not self.mainLoopRunning:
self.mainLoopRunning = True
try:
self._canvas._root().mainloop()
except WindowGone:
pass
# These three functions all set flags in the _events list - and then are handled
# by the originating tkinter thread later. Required as if the code to execute
# these functions called by the non-Tkinter thread, then Tkinter hangs.
def set_size( self, x, y ):
self._events = self._events + [ ["ss",x,y] ]
def complete( self, a=None ):
if threading.currentThread() != self.mainThread:
if "c" not in self._events:
if a == None:
self._events = self._events + ["c"]
else:
self._events = self._events + ["c"+a]
else:
if a == None:
self._normal_complete()
else:
self._normal_complete( a )
def quitCanvas( self ):
if "q" not in self._events:
self._events = self._events + [ "q" ]
# Enables a separate thread to be run alongside the Tkinter thread.
# This is the unsafest part of the module, since separate threads shouldn't be allowed
# to call the Tkinter functions - but it seems to work for the Canvas functions
def runGraphicsFn( self,g ):
def gWrap():
try:
g()
except WindowGone: # Enables threads to die quietly if Tkinter closed by user
pass
newThread = threading.Thread( target = gWrap )
newThread.start()
# A range of event handler setting functions next
def set_keydown_handler( self, handler ):
def inner_handler( e ):
if self.no_current_keyhandler_call:
self.no_current_keyhandler_call = False
handler( e.keysym )
self.no_current_keyhandler_call = True
self._canvas._root().bind( "<Any-KeyPress>", inner_handler )
self._canvas._root().update()
def unset_keydown_handler( self ):
self._canvas._root().unbind( "<Any-KeyPress>" )
def set_mousedown_handler( self, handler ):
def inner_handler( e ):
if self.no_current_mousehandler_call:
self.no_current_mousehandler_call = False
handler( e.x, e.y, e.num )
self.no_current_mousehandler_call = True
self._canvas.bind( "<Any-Button>", inner_handler )
self._canvas._root().update()
def unset_mousedown_handler( self ):
self._canvas.unbind( "<Any-Button>" )
def set_mouseup_handler( self, handler ):
def inner_handler( e ):
if self.no_current_mousehandler_call:
self.no_current_mousehandler_call = False
handler( e.x, e.y, e.num )
self.no_current_mousehandler_call = True
self._canvas.bind( "<Any-ButtonRelease>", inner_handler )
self._canvas._root().update()
def unset_mouseup_handler( self ):
self._canvas.unbind( "<Any-ButtonRelease>" )
def set_mousemotion_handler( self, handler ):
def inner_handler( e ):
if self.no_current_mousehandler_call:
self.no_current_mousehandler_call = False
handler( e.x, e.y )
self.no_current_mousehandler_call = True
self._canvas.bind( "<Motion>", inner_handler )
self._canvas._root().update()
def unset_mousemotion_handler( self ):
self._canvas.unbind( "<Motion>" )
_can = None # This is the Glasgow canvas
_hadCan = False # Did we ever open a Canvas, even though it might now be dead?
_blockCalls = False # When True, don't try to execute Canvas ops, because Window has been closed
class Can( RawCanvas ):
def __init__( self ):
global _root, _canvas
self._root = Tk()
self._canvas = Canvas( self._root, background = "white" )
self._canvas.pack(expand=1, fill="both" )
RawCanvas.__init__( self )
self._root.iconify()
self._root.update()
self._root.deiconify()
self._root.update()
def destroy( event=None, extra=None ):
global _blockCalls, _root
_blockCalls = True
time.sleep( 0.5 )
self._root.destroy()
self.destroy = destroy
self._root.protocol("WM_DELETE_WINDOW",self.destroy )
# Finally, get the event checker running, to pick up events
# coming in from other threads that want to act on the tkinter thread
def update_tkinter():
if self._events != []:
for e in self._events:
if type( e ) == type( "" ):
if e[0] == "c":
if len( e ) == 1:
self._normal_complete()
else:
self._normal_complete( e[1:] )
elif e == "q":
self.destroy()
else: # must be ["ss", x, y] for a set screen
self._canvas.config( width = e[1], height = e[2] )
self._events = []
self._root.after( 10, update_tkinter )
update_tkinter()
def _getCanvas():
global _can, _hadCan, _blockCalls
if (_hadCan and not _can) or _blockCalls:
raise WindowGone
if not _can:
_can = Can()
_hadCan = True
return _can
##########################################################
# These are the only visible functions out of the module
#
# i.e. These are the functions that you can use
##########################################################
def create_rectangle( x1, y1, x2, y2, **kw ):
return _getCanvas().create_rectangle( x1, y1, x2, y2, kw )
def create_arc( x1, y1, x2, y2, **kw ):
return _getCanvas().create_arc( x1, y1, x2, y2, kw )
def create_line( x1, y1, x2, y2, **kw ):
return _getCanvas().create_line( x1, y1, x2, y2, kw )
def create_oval( x1, y1, x2, y2, **kw ):
return _getCanvas().create_oval( x1, y1, x2, y2, kw )
def create_text( x1, y1, **kw ):
return _getCanvas().create_text( x1, y1, kw )
def create_image( x1, y1, **kw ):
return _getCanvas().create_image( x1, y1, kw )
def move( tagOrId, xInc, yInc ):
_getCanvas().move( tagOrId, xInc, yInc )
def delete( tagOrId ):
_getCanvas().delete( tagOrId )
def addToGroup( groupName, tagOrId, *moreTagOrIds ):
_getCanvas().addToGroup( groupName, tagOrId, moreTagOrIds )
def importImage( filename ):
return _getCanvas().importImage( filename )
def wait( t1 ):
time.sleep( t1 )
def set_title( txt ):
_getCanvas().set_title( txt )
def set_size( x, y ):
_getCanvas().set_size( x, y )
def complete( a = None ):
_getCanvas().complete( a )
def run():
_getCanvas().run()
def quitCanvas():
_getCanvas().quitCanvas()
def runGraphicsFn( g ):
_getCanvas().runGraphicsFn( g )
def set_keydown_handler( handler ):
_getCanvas().set_keydown_handler( handler )
def unset_keydown_handler():
_getCanvas().unset_keydown_handler()
def set_mousedown_handler( handler ):
_getCanvas().set_mousedown_handler( handler )
def unset_mousedown_handler( handler ):
_getCanvas().unset_mousedown_handler()
def set_mouseup_handler( handler ):
_getCanvas().set_mouseup_handler( handler )
def unset_mouseup_handler():
_getCanvas().unset_mouseup_handler()
def set_mousemotion_handler( handler ):
_getCanvas().set_mousemotion_handler( handler )
def unset_mousemotion_handler():
_getCanvas().unset_mousemotion_handler()
def end_x( start_x, length, angle ):
return start_x + length * math.sin( math.radians( angle ) )
def end_y( start_y, length, angle ):
return start_y + length * math.cos( math.radians( angle ) )
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
from __future__ import unicode_literals
import frappe, json
from frappe.utils import cstr, cint
from frappe.model import integer_docfield_properties, default_fields, no_value_fields, optional_fields
from frappe.model.document import Document
from frappe.model.base_document import BaseDocument
from frappe.model.db_schema import type_map
from frappe.modules import load_doctype_module
def get_meta(doctype, cached=True):
if cached:
return frappe.cache().hget("meta", doctype, lambda: Meta(doctype))
else:
return Meta(doctype)
def get_table_columns(doctype):
return frappe.cache().hget("table_columns", doctype,
lambda: frappe.db.get_table_columns(doctype))
def load_doctype_from_file(doctype):
fname = frappe.scrub(doctype)
with open(frappe.get_app_path("frappe", "core", "doctype", fname, fname + ".json"), "r") as f:
txt = json.loads(f.read())
for d in txt.get("fields", []):
d["doctype"] = "DocField"
for d in txt.get("permissions", []):
d["doctype"] = "DocPerm"
txt["fields"] = [BaseDocument(d) for d in txt["fields"]]
if "permissions" in txt:
txt["permissions"] = [BaseDocument(d) for d in txt["permissions"]]
return txt
class Meta(Document):
_metaclass = True
default_fields = list(default_fields)[1:]
special_doctypes = ("DocField", "DocPerm", "Role", "DocType", "Module Def")
def __init__(self, doctype):
self._fields = {}
super(Meta, self).__init__("DocType", doctype)
self.process()
def load_from_db(self):
try:
super(Meta, self).load_from_db()
except frappe.DoesNotExistError:
if self.doctype=="DocType" and self.name in self.special_doctypes:
self.__dict__.update(load_doctype_from_file(self.name))
else:
raise
def get_link_fields(self):
return self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]})
def get_dynamic_link_fields(self):
if not hasattr(self, '_dynamic_link_fields'):
self._dynamic_link_fields = self.get("fields", {"fieldtype": "Dynamic Link"})
return self._dynamic_link_fields
def get_select_fields(self):
return self.get("fields", {"fieldtype": "Select", "options":["not in",
["[Select]", "Loading..."]]})
def get_table_fields(self):
if not hasattr(self, "_table_fields"):
if self.name!="DocType":
self._table_fields = self.get('fields', {"fieldtype":"Table"})
else:
self._table_fields = doctype_table_fields
return self._table_fields
def get_valid_columns(self):
if not hasattr(self, "_valid_columns"):
if self.name in ("DocType", "DocField", "DocPerm", "Property Setter"):
self._valid_columns = get_table_columns(self.name)
else:
self._valid_columns = self.default_fields + \
[df.fieldname for df in self.get("fields") if df.fieldtype in type_map]
return self._valid_columns
def get_table_field_doctype(self, fieldname):
return { "fields": "DocField", "permissions": "DocPerm"}.get(fieldname)
def get_field(self, fieldname):
'''Return docfield from meta'''
if not self._fields:
for f in self.get("fields"):
self._fields[f.fieldname] = f
return self._fields.get(fieldname)
def has_field(self, fieldname):
'''Returns True if fieldname exists'''
return True if self.get_field(fieldname) else False
def get_label(self, fieldname):
return self.get_field(fieldname).label
def get_options(self, fieldname):
return self.get_field(fieldname).options
def get_link_doctype(self, fieldname):
df = self.get_field(fieldname)
if df.fieldtype == "Link":
return df.options
elif df.fieldtype == "Dynamic Link":
return self.get_options(df.options)
else:
return None
def get_search_fields(self):
search_fields = self.search_fields or "name"
search_fields = [d.strip() for d in search_fields.split(",")]
if "name" not in search_fields:
search_fields.append("name")
return search_fields
def get_list_fields(self):
list_fields = ["name"] + [d.fieldname \
for d in self.fields if (d.in_list_view and d.fieldtype in type_map)]
if self.title_field and self.title_field not in list_fields:
list_fields.append(self.title_field)
return list_fields
def get_custom_fields(self):
return [d for d in self.fields if d.get('is_custom_field')]
def get_title_field(self):
return self.title_field or "name"
def process(self):
# don't process for special doctypes
# prevent's circular dependency
if self.name in self.special_doctypes:
return
self.add_custom_fields()
self.apply_property_setters()
self.sort_fields()
self.get_valid_columns()
def add_custom_fields(self):
try:
self.extend("fields", frappe.db.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.name,), as_dict=1,
update={"is_custom_field": 1}))
except Exception, e:
if e.args[0]==1146:
return
else:
raise
def apply_property_setters(self):
for ps in frappe.db.sql("""select * from `tabProperty Setter` where
doc_type=%s""", (self.name,), as_dict=1):
if ps.doctype_or_field=='DocType':
if ps.property_type in ('Int', 'Check'):
ps.value = cint(ps.value)
self.set(ps.property, ps.value)
else:
docfield = self.get("fields", {"fieldname":ps.field_name}, limit=1)
if docfield:
docfield = docfield[0]
else:
continue
if ps.property in integer_docfield_properties:
ps.value = cint(ps.value)
docfield.set(ps.property, ps.value)
def sort_fields(self):
"""sort on basis of insert_after"""
custom_fields = sorted(self.get_custom_fields(), key=lambda df: df.idx)
if custom_fields:
newlist = []
# if custom field is at top
# insert_after is false
for c in list(custom_fields):
if not c.insert_after:
newlist.append(c)
custom_fields.pop(custom_fields.index(c))
# standard fields
newlist += [df for df in self.get('fields') if not df.get('is_custom_field')]
newlist_fieldnames = [df.fieldname for df in newlist]
for i in xrange(2):
for df in list(custom_fields):
if df.insert_after in newlist_fieldnames:
cf = custom_fields.pop(custom_fields.index(df))
idx = newlist_fieldnames.index(df.insert_after)
newlist.insert(idx + 1, cf)
newlist_fieldnames.insert(idx + 1, cf.fieldname)
if not custom_fields:
break
# worst case, add remaining custom fields to last
if custom_fields:
newlist += custom_fields
# renum idx
for i, f in enumerate(newlist):
f.idx = i + 1
self.fields = newlist
def get_fields_to_check_permissions(self, user_permission_doctypes):
fields = self.get("fields", {
"fieldtype":"Link",
"parent": self.name,
"ignore_user_permissions":("!=", 1),
"options":("in", user_permission_doctypes)
})
if self.name in user_permission_doctypes:
fields.append(frappe._dict({
"label":"Name",
"fieldname":"name",
"options": self.name
}))
return fields
def get_high_permlevel_fields(self):
"""Build list of fields with high perm level and all the higher perm levels defined."""
if not hasattr(self, "high_permlevel_fields"):
self.high_permlevel_fields = []
for df in self.fields:
if df.permlevel > 0:
self.high_permlevel_fields.append(df)
return self.high_permlevel_fields
def get_links_setup(self):
'''Returns setup for documents related to this doctype.
This method will return the `links_setup` property in the
`[doctype]_links.py` file in the doctype folder'''
try:
module = load_doctype_module(self.name, suffix='_links')
return frappe._dict(module.links)
except ImportError:
return frappe._dict()
doctype_table_fields = [
frappe._dict({"fieldname": "fields", "options": "DocField"}),
frappe._dict({"fieldname": "permissions", "options": "DocPerm"})
]
#######
def is_single(doctype):
try:
return frappe.db.get_value("DocType", doctype, "issingle")
except IndexError:
raise Exception, 'Cannot determine whether %s is single' % doctype
def get_parent_dt(dt):
parent_dt = frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s and (parent not like "old_parent:%%")
limit 1""", dt)
return parent_dt and parent_dt[0][0] or ''
def set_fieldname(field_id, fieldname):
frappe.db.set_value('DocField', field_id, 'fieldname', fieldname)
def get_field_currency(df, doc=None):
"""get currency based on DocField options and fieldvalue in doc"""
currency = None
if not df.get("options"):
return None
if not doc:
return None
if not getattr(frappe.local, "field_currency", None):
frappe.local.field_currency = frappe._dict()
if not (frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or
(doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))):
ref_docname = doc.parent or doc.name
if ":" in cstr(df.get("options")):
split_opts = df.get("options").split(":")
if len(split_opts)==3:
currency = frappe.db.get_value(split_opts[0], doc.get(split_opts[1]), split_opts[2])
else:
currency = doc.get(df.get("options"))
if doc.parent:
if currency:
ref_docname = doc.name
else:
currency = frappe.db.get_value(doc.parenttype, doc.parent, df.get("options"))
if currency:
frappe.local.field_currency.setdefault((doc.doctype, ref_docname), frappe._dict())\
.setdefault(df.fieldname, currency)
return frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or \
(doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))
def get_field_precision(df, doc=None, currency=None):
"""get precision based on DocField options and fieldvalue in doc"""
from frappe.utils import get_number_format_info
if cint(df.precision):
precision = cint(df.precision)
elif df.fieldtype == "Currency":
number_format = None
if not currency and doc:
currency = get_field_currency(df, doc)
if not currency:
# use default currency
currency = frappe.db.get_default("currency")
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
else:
precision = cint(frappe.db.get_default("float_precision")) or 3
return precision
def get_default_df(fieldname):
if fieldname in default_fields:
if fieldname in ("creation", "modified"):
return frappe._dict(
fieldname = fieldname,
fieldtype = "Datetime"
)
else:
return frappe._dict(
fieldname = fieldname,
fieldtype = "Data"
)
def trim_tables():
"""Use this to remove columns that don't exist in meta"""
ignore_fields = default_fields + optional_fields
for doctype in frappe.db.get_all("DocType", filters={"issingle": 0}):
doctype = doctype.name
columns = frappe.db.get_table_columns(doctype)
fields = [df.fieldname for df in frappe.get_meta(doctype).fields if df.fieldtype not in no_value_fields]
columns_to_remove = [f for f in list(set(columns) - set(fields)) if f not in ignore_fields
and not f.startswith("_")]
if columns_to_remove:
print doctype, "columns removed:", columns_to_remove
columns_to_remove = ", ".join(["drop `{0}`".format(c) for c in columns_to_remove])
query = """alter table `tab{doctype}` {columns}""".format(
doctype=doctype, columns=columns_to_remove)
frappe.db.sql_ddl(query)
def clear_cache(doctype=None):
cache = frappe.cache()
for key in ('is_table', 'doctype_modules'):
cache.delete_value(key)
groups = ["meta", "form_meta", "table_columns", "last_modified", "linked_doctypes"]
def clear_single(dt):
for name in groups:
cache.hdel(name, dt)
# also clear linked_with list cache
cache.delete_keys("user:*:linked_with:{doctype}:".format(doctype=doctype))
if doctype:
clear_single(doctype)
# clear all parent doctypes
for dt in frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s""", (doctype,)):
clear_single(dt[0])
# clear all notifications
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
else:
# clear all
for name in groups:
cache.delete_value(name)
|
|
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from auto_ml import Predictor
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_predict_uncertainty_true():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=True)
intervals = ml_predictor.predict_intervals(df_boston_test)
assert isinstance(intervals, pd.DataFrame)
assert intervals.shape[0] == df_boston_test.shape[0]
result_list = ml_predictor.predict_intervals(df_boston_test, return_type='list')
assert isinstance(result_list, list)
assert len(result_list) == df_boston_test.shape[0]
for idx, row in enumerate(result_list):
assert isinstance(row, list)
assert len(row) == 3
singles = df_boston_test.head().to_dict('records')
for row in singles:
result = ml_predictor.predict_intervals(row)
assert isinstance(result, dict)
assert 'prediction' in result
assert 'interval_0.05' in result
assert 'interval_0.95' in result
for row in singles:
result = ml_predictor.predict_intervals(row, return_type='list')
assert isinstance(result, list)
assert len(result) == 3
df_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='df')
assert isinstance(df_intervals, pd.DataFrame)
try:
ml_predictor.predict_intervals(df_boston_test, return_type='this will not work')
assert False
except ValueError:
assert True
def test_prediction_intervals_actually_work():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=[0.05, 0.95])
df_boston_test = df_boston_test.reset_index(drop=True)
intervals = ml_predictor.predict_intervals(df_boston_test)
actuals = df_boston_test.MEDV
count_under = 0
count_over = 0
# print(intervals)
for idx, row in intervals.iterrows():
actual = actuals.iloc[idx]
if actual < row['interval_0.05']:
count_under += 1
if actual > row['interval_0.95']:
count_over += 1
len_intervals = len(intervals)
pct_under = count_under * 1.0 / len_intervals
pct_over = count_over * 1.0 / len_intervals
# There's a decent bit of noise since this is such a small dataset
assert pct_under < 0.15
assert pct_over < 0.1
def test_prediction_intervals_lets_the_user_specify_number_of_intervals():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=True, prediction_intervals=[.2])
intervals = ml_predictor.predict_intervals(df_boston_test, return_type='list')
assert len(intervals[0]) == 2
def test_predict_intervals_should_fail_if_not_trained():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train)
try:
intervals = ml_predictor.predict_intervals(df_boston_test)
assert False
except ValueError:
assert True
def test_predict_intervals_takes_in_custom_intervals():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# df_boston_train = pd.concat([df_boston_train, df_boston_train, df_boston_train])
ml_predictor.train(df_boston_train, predict_intervals=[0.4, 0.6])
custom_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='list')
assert isinstance(custom_intervals, list)
singles = df_boston_test.head().to_dict('records')
acceptable_keys = set(['prediction', 'interval_0.4', 'interval_0.6'])
for row in singles:
result = ml_predictor.predict_intervals(row)
assert isinstance(result, dict)
assert 'prediction' in result
assert 'interval_0.4' in result
assert 'interval_0.6' in result
for key in result.keys():
assert key in acceptable_keys
for row in singles:
result = ml_predictor.predict_intervals(row, return_type='list')
assert isinstance(result, list)
assert len(result) == 3
df_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='df')
assert df_intervals.shape[0] == df_boston_test.shape[0]
assert isinstance(df_intervals, pd.DataFrame)
# Now make sure that the interval values are actually different
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=True)
default_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='list')
# This is a super flaky test, because we've got such a small datasize, and we're trying to get distributions from it
len_intervals = len(custom_intervals)
num_failures = 0
for idx, custom_row in enumerate(custom_intervals):
default_row = default_intervals[idx]
if int(custom_row[1]) <= int(default_row[1]):
num_failures += 1
print('{} should be higher than {}'.format(custom_row[1], default_row[1]))
if int(custom_row[2]) >= int(default_row[2]):
print('{} should be lower than {}'.format(custom_row[1], default_row[1]))
num_failures += 1
assert num_failures < 0.18 * len_intervals
|
|
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the conductor RPC API.
"""
import random
import oslo_messaging as messaging
from ironic.common import exception
from ironic.common import hash_ring
from ironic.common.i18n import _
from ironic.common import release_mappings as versions
from ironic.common import rpc
from ironic.conductor import manager
from ironic.conf import CONF
from ironic.objects import base as objects_base
class ConductorAPI(object):
"""Client side of the conductor RPC API.
API version history:
| 1.0 - Initial version.
| Included get_node_power_status
| 1.1 - Added update_node and start_power_state_change.
| 1.2 - Added vendor_passthru.
| 1.3 - Rename start_power_state_change to change_node_power_state.
| 1.4 - Added do_node_deploy and do_node_tear_down.
| 1.5 - Added validate_driver_interfaces.
| 1.6 - change_node_power_state, do_node_deploy and do_node_tear_down
| accept node id instead of node object.
| 1.7 - Added topic parameter to RPC methods.
| 1.8 - Added change_node_maintenance_mode.
| 1.9 - Added destroy_node.
| 1.10 - Remove get_node_power_state
| 1.11 - Added get_console_information, set_console_mode.
| 1.12 - validate_vendor_action, do_vendor_action replaced by single
| vendor_passthru method.
| 1.13 - Added update_port.
| 1.14 - Added driver_vendor_passthru.
| 1.15 - Added rebuild parameter to do_node_deploy.
| 1.16 - Added get_driver_properties.
| 1.17 - Added set_boot_device, get_boot_device and
| get_supported_boot_devices.
| 1.18 - Remove change_node_maintenance_mode.
| 1.19 - Change return value of vendor_passthru and
| driver_vendor_passthru
| 1.20 - Added http_method parameter to vendor_passthru and
| driver_vendor_passthru
| 1.21 - Added get_node_vendor_passthru_methods and
| get_driver_vendor_passthru_methods
| 1.22 - Added configdrive parameter to do_node_deploy.
| 1.23 - Added do_provisioning_action
| 1.24 - Added inspect_hardware method
| 1.25 - Added destroy_port
| 1.26 - Added continue_node_clean
| 1.27 - Convert continue_node_clean to cast
| 1.28 - Change exceptions raised by destroy_node
| 1.29 - Change return value of vendor_passthru and
| driver_vendor_passthru to a dictionary
| 1.30 - Added set_target_raid_config and
| get_raid_logical_disk_properties
| 1.31 - Added Versioned Objects indirection API methods:
| object_class_action_versions, object_action and
| object_backport_versions
| 1.32 - Add do_node_clean
| 1.33 - Added update and destroy portgroup.
| 1.34 - Added heartbeat
| 1.35 - Added destroy_volume_connector and update_volume_connector
| 1.36 - Added create_node
| 1.37 - Added destroy_volume_target and update_volume_target
| 1.38 - Added vif_attach, vif_detach, vif_list
| 1.39 - Added timeout optional parameter to change_node_power_state
| 1.40 - Added inject_nmi
| 1.41 - Added create_port
"""
# NOTE(rloo): This must be in sync with manager.ConductorManager's.
# NOTE(pas-ha): This also must be in sync with
# ironic.common.release_mappings.RELEASE_MAPPING['master']
RPC_API_VERSION = '1.41'
def __init__(self, topic=None):
super(ConductorAPI, self).__init__()
self.topic = topic
if self.topic is None:
self.topic = manager.MANAGER_TOPIC
target = messaging.Target(topic=self.topic,
version='1.0')
serializer = objects_base.IronicObjectSerializer()
release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version)
version_cap = (release_ver['rpc'] if release_ver
else self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
# NOTE(deva): this is going to be buggy
self.ring_manager = hash_ring.HashRingManager()
def get_topic_for(self, node):
"""Get the RPC topic for the conductor service the node is mapped to.
:param node: a node object.
:returns: an RPC topic string.
:raises: NoValidHost
"""
self.ring_manager.reset()
try:
ring = self.ring_manager[node.driver]
dest = ring.get_nodes(node.uuid.encode('utf-8'),
replicas=CONF.hash_distribution_replicas)
return '%s.%s' % (self.topic, dest.pop())
except exception.DriverNotFound:
reason = (_('No conductor service registered which supports '
'driver %s.') % node.driver)
raise exception.NoValidHost(reason=reason)
def get_topic_for_driver(self, driver_name):
"""Get RPC topic name for a conductor supporting the given driver.
The topic is used to route messages to the conductor supporting
the specified driver. A conductor is selected at random from the
set of qualified conductors.
:param driver_name: the name of the driver to route to.
:returns: an RPC topic string.
:raises: DriverNotFound
"""
self.ring_manager.reset()
ring = self.ring_manager[driver_name]
host = random.choice(list(ring.nodes))
return self.topic + "." + host
def create_node(self, context, node_obj, topic=None):
"""Synchronously, have a conductor validate and create a node.
Create the node's information in the database and return a node object.
:param context: request context.
:param node_obj: a created (but not saved) node object.
:param topic: RPC topic. Defaults to self.topic.
:returns: created node object.
:raises: InterfaceNotFoundInEntrypoint if validation fails for any
dynamic interfaces (e.g. network_interface).
:raises: NoValidDefaultForInterface if no default can be calculated
for some interfaces, and explicit values must be provided.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.36')
return cctxt.call(context, 'create_node', node_obj=node_obj)
def update_node(self, context, node_obj, topic=None):
"""Synchronously, have a conductor update the node's information.
Update the node's information in the database and return a node object.
The conductor will lock the node while it validates the supplied
information. If driver_info is passed, it will be validated by
the core drivers. If instance_uuid is passed, it will be set or unset
only if the node is properly configured.
Note that power_state should not be passed via this method.
Use change_node_power_state for initiating driver actions.
:param context: request context.
:param node_obj: a changed (but not saved) node object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated node object, including all fields.
:raises: NoValidDefaultForInterface if no default can be calculated
for some interfaces, and explicit values must be provided.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.1')
return cctxt.call(context, 'update_node', node_obj=node_obj)
def change_node_power_state(self, context, node_id, new_state,
topic=None, timeout=None):
"""Change a node's power state.
Synchronously, acquire lock and start the conductor background task
to change power state of a node.
:param context: request context.
:param node_id: node id or uuid.
:param new_state: one of ironic.common.states power state values
:param timeout: timeout (in seconds) positive integer (> 0) for any
power state. ``None`` indicates to use default timeout.
:param topic: RPC topic. Defaults to self.topic.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.39')
return cctxt.call(context, 'change_node_power_state', node_id=node_id,
new_state=new_state, timeout=timeout)
def vendor_passthru(self, context, node_id, driver_method, http_method,
info, topic=None):
"""Receive requests for vendor-specific actions.
Synchronously validate driver specific info or get driver status,
and if successful invokes the vendor method. If the method mode
is async the conductor will start background worker to perform
vendor action.
:param context: request context.
:param node_id: node id or uuid.
:param driver_method: name of method for driver.
:param http_method: the HTTP method used for the request.
:param info: info for node driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if supplied info is not valid.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: NodeLocked if node is locked by another conductor.
:returns: A dictionary containing:
:return: The response of the invoked vendor method
:async: Boolean value. Whether the method was invoked
asynchronously (True) or synchronously (False). When invoked
asynchronously the response will be always None.
:attach: Boolean value. Whether to attach the response of
the invoked vendor method to the HTTP response object (True)
or return it in the response body (False).
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'vendor_passthru', node_id=node_id,
driver_method=driver_method,
http_method=http_method,
info=info)
def driver_vendor_passthru(self, context, driver_name, driver_method,
http_method, info, topic=None):
"""Pass vendor-specific calls which don't specify a node to a driver.
Handles driver-level vendor passthru calls. These calls don't
require a node UUID and are executed on a random conductor with
the specified driver. If the method mode is async the conductor
will start background worker to perform vendor action.
:param context: request context.
:param driver_name: name of the driver on which to call the method.
:param driver_method: name of the vendor method, for use by the driver.
:param http_method: the HTTP method used for the request.
:param info: data to pass through to the driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue for parameter errors.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if the driver doesn't have a vendor
interface, or if the vendor interface does not support the
specified driver_method.
:raises: DriverNotFound if the supplied driver is not loaded.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: InterfaceNotFoundInEntrypoint if the default interface for a
hardware type is invalid.
:raises: NoValidDefaultForInterface if no default interface
implementation can be found for this driver's vendor
interface.
:returns: A dictionary containing:
:return: The response of the invoked vendor method
:async: Boolean value. Whether the method was invoked
asynchronously (True) or synchronously (False). When invoked
asynchronously the response will be always None.
:attach: Boolean value. Whether to attach the response of
the invoked vendor method to the HTTP response object (True)
or return it in the response body (False).
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'driver_vendor_passthru',
driver_name=driver_name,
driver_method=driver_method,
http_method=http_method,
info=info)
def get_node_vendor_passthru_methods(self, context, node_id, topic=None):
"""Retrieve information about vendor methods of the given node.
:param context: an admin context.
:param node_id: the id or uuid of a node.
:param topic: RPC topic. Defaults to self.topic.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_node_vendor_passthru_methods',
node_id=node_id)
def get_driver_vendor_passthru_methods(self, context, driver_name,
topic=None):
"""Retrieve information about vendor methods of the given driver.
:param context: an admin context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface.
:raises: DriverNotFound if the supplied driver is not loaded.
:raises: InterfaceNotFoundInEntrypoint if the default interface for a
hardware type is invalid.
:raises: NoValidDefaultForInterface if no default interface
implementation can be found for this driver's vendor
interface.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_driver_vendor_passthru_methods',
driver_name=driver_name)
def do_node_deploy(self, context, node_id, rebuild, configdrive,
topic=None):
"""Signal to conductor service to perform a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param rebuild: True if this is a rebuild request.
:param configdrive: A gzipped and base64 encoded configdrive.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
undeployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.22')
return cctxt.call(context, 'do_node_deploy', node_id=node_id,
rebuild=rebuild, configdrive=configdrive)
def do_node_tear_down(self, context, node_id, topic=None):
"""Signal to conductor service to tear down a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
deployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
return cctxt.call(context, 'do_node_tear_down', node_id=node_id)
def do_provisioning_action(self, context, node_id, action, topic=None):
"""Signal to conductor service to perform the given action on a node.
:param context: request context.
:param node_id: node id or uuid.
:param action: an action. One of ironic.common.states.VERBS
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: InvalidStateRequested if the requested action can not
be performed.
This encapsulates some provisioning actions in a single call.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.23')
return cctxt.call(context, 'do_provisioning_action',
node_id=node_id, action=action)
def continue_node_clean(self, context, node_id, topic=None):
"""Signal to conductor service to start the next cleaning action.
NOTE(JoshNang) this is an RPC cast, there will be no response or
exception raised by the conductor for this RPC.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.27')
return cctxt.cast(context, 'continue_node_clean',
node_id=node_id)
def validate_driver_interfaces(self, context, node_id, topic=None):
"""Validate the `core` and `standardized` interfaces for drivers.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary containing the results of each
interface validation.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.5')
return cctxt.call(context, 'validate_driver_interfaces',
node_id=node_id)
def destroy_node(self, context, node_id, topic=None):
"""Delete a node.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeAssociated if the node contains an instance
associated with it.
:raises: InvalidState if the node is in the wrong provision
state to perform deletion.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.9')
return cctxt.call(context, 'destroy_node', node_id=node_id)
def get_console_information(self, context, node_id, topic=None):
"""Get connection information about the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'get_console_information', node_id=node_id)
def set_console_mode(self, context, node_id, enabled, topic=None):
"""Enable/Disable the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:param enabled: Boolean value; whether the console is enabled or
disabled.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'set_console_mode', node_id=node_id,
enabled=enabled)
def create_port(self, context, port_obj, topic=None):
"""Synchronously, have a conductor validate and create a port.
Create the port's information in the database and return a port object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param port_obj: a created (but not saved) port object.
:param topic: RPC topic. Defaults to self.topic.
:returns: created port object.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.41')
return cctxt.call(context, 'create_port', port_obj=port_obj)
def update_port(self, context, port_obj, topic=None):
"""Synchronously, have a conductor update the port's information.
Update the port's information in the database and return a port object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param port_obj: a changed (but not saved) port object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated port object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.13')
return cctxt.call(context, 'update_port', port_obj=port_obj)
def update_portgroup(self, context, portgroup_obj, topic=None):
"""Synchronously, have a conductor update the portgroup's information.
Update the portgroup's information in the database and return a
portgroup object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param portgroup_obj: a changed (but not saved) portgroup object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated portgroup object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.33')
return cctxt.call(context, 'update_portgroup',
portgroup_obj=portgroup_obj)
def destroy_portgroup(self, context, portgroup, topic=None):
"""Delete a portgroup.
:param context: request context.
:param portgroup: portgroup object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the portgroup does
not exist.
:raises: PortgroupNotEmpty if portgroup is not empty
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.33')
return cctxt.call(context, 'destroy_portgroup', portgroup=portgroup)
def get_driver_properties(self, context, driver_name, topic=None):
"""Get the properties of the driver.
:param context: request context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary with <property name>:<property description>
entries.
:raises: DriverNotFound.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.16')
return cctxt.call(context, 'get_driver_properties',
driver_name=driver_name)
def set_boot_device(self, context, node_id, device, persistent=False,
topic=None):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node. Be aware
that not all drivers support this.
:param context: request context.
:param node_id: node id or uuid.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified or an invalid boot device is specified.
:raises: MissingParameterValue if missing supplied info.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'set_boot_device', node_id=node_id,
device=device, persistent=persistent)
def get_boot_device(self, context, node_id, topic=None):
"""Get the current boot device.
Returns the current boot device of a node.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_boot_device', node_id=node_id)
def inject_nmi(self, context, node_id, topic=None):
"""Inject NMI for a node.
Inject NMI (Non Maskable Interrupt) for a node immediately.
Be aware that not all drivers support this.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management or management.inject_nmi.
:raises: InvalidParameterValue when the wrong driver info is
specified or an invalid boot device is specified.
:raises: MissingParameterValue if missing supplied info.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.40')
return cctxt.call(context, 'inject_nmi', node_id=node_id)
def get_supported_boot_devices(self, context, node_id, topic=None):
"""Get the list of supported devices.
Returns the list of supported boot devices of a node.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_supported_boot_devices',
node_id=node_id)
def inspect_hardware(self, context, node_id, topic=None):
"""Signals the conductor service to perform hardware introspection.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: HardwareInspectionFailure
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support inspection.
:raises: InvalidStateRequested if 'inspect' is not a valid
action to do in the current state.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.24')
return cctxt.call(context, 'inspect_hardware', node_id=node_id)
def destroy_port(self, context, port, topic=None):
"""Delete a port.
:param context: request context.
:param port: port object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the port does not
exist.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.25')
return cctxt.call(context, 'destroy_port', port=port)
def set_target_raid_config(self, context, node_id, target_raid_config,
topic=None):
"""Stores the target RAID configuration on the node.
Stores the target RAID configuration on node.target_raid_config
:param context: request context.
:param node_id: node id or uuid.
:param target_raid_config: Dictionary containing the target RAID
configuration. It may be an empty dictionary as well.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support RAID configuration.
:raises: InvalidParameterValue, if validation of target raid config
fails.
:raises: MissingParameterValue, if some required parameters are
missing.
:raises: NodeLocked if node is locked by another conductor.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.30')
return cctxt.call(context, 'set_target_raid_config',
node_id=node_id,
target_raid_config=target_raid_config)
def get_raid_logical_disk_properties(self, context, driver_name,
topic=None):
"""Get the logical disk properties for RAID configuration.
Gets the information about logical disk properties which can
be specified in the input RAID configuration.
:param context: request context.
:param driver_name: name of the driver
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the driver doesn't
support RAID configuration.
:raises: InterfaceNotFoundInEntrypoint if the default interface for a
hardware type is invalid.
:raises: NoValidDefaultForInterface if no default interface
implementation can be found for this driver's RAID
interface.
:returns: A dictionary containing the properties that can be mentioned
for logical disks and a textual description for them.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.30')
return cctxt.call(context, 'get_raid_logical_disk_properties',
driver_name=driver_name)
def do_node_clean(self, context, node_id, clean_steps, topic=None):
"""Signal to conductor service to perform manual cleaning on a node.
:param context: request context.
:param node_id: node ID or UUID.
:param clean_steps: a list of clean step dictionaries.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if validation of power driver interface
failed.
:raises: InvalidStateRequested if cleaning can not be performed.
:raises: NodeInMaintenance if node is in maintenance mode.
:raises: NodeLocked if node is locked by another conductor.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.32')
return cctxt.call(context, 'do_node_clean',
node_id=node_id, clean_steps=clean_steps)
def heartbeat(self, context, node_id, callback_url, topic=None):
"""Process a node heartbeat.
:param context: request context.
:param node_id: node ID or UUID.
:param callback_url: URL to reach back to the ramdisk.
:param topic: RPC topic. Defaults to self.topic.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.34')
return cctxt.call(context, 'heartbeat', node_id=node_id,
callback_url=callback_url)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
"""Perform an action on a VersionedObject class.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param object_versions: A dict of {objname: version} mappings
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_class_action_versions',
objname=objname, objmethod=objmethod,
object_versions=object_versions,
args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on a VersionedObject instance.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the action
:param objinst: The object instance on which to perform the action
:param objmethod: The name of the action method to call
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: A tuple with the updates made to the object and
the result of the action method
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport_versions(self, context, objinst, object_versions):
"""Perform a backport of an object instance.
The default behavior of the base VersionedObjectSerializer, upon
receiving an object with a version newer than what is in the local
registry, is to call this method to request a backport of the object.
We want any conductor to handle this, so it is intentional that there
is no topic argument for this method.
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param object_versions: A dict of {objname: version} mappings
:raises: NotImplementedError when an operator makes an error during
upgrade
:returns: The downgraded instance of objinst
"""
if not self.client.can_send_version('1.31'):
raise NotImplementedError(_('Incompatible conductor version - '
'please upgrade ironic-conductor '
'first'))
cctxt = self.client.prepare(topic=self.topic, version='1.31')
return cctxt.call(context, 'object_backport_versions', objinst=objinst,
object_versions=object_versions)
def destroy_volume_connector(self, context, connector, topic=None):
"""Delete a volume connector.
Delete the volume connector. The conductor will lock the related node
during this operation.
:param context: request context
:param connector: volume connector object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorNotFound if the volume connector cannot be
found
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.35')
return cctxt.call(context, 'destroy_volume_connector',
connector=connector)
def update_volume_connector(self, context, connector, topic=None):
"""Update the volume connector's information.
Update the volume connector's information in the database and return
a volume connector object. The conductor will lock the related node
during this operation.
:param context: request context
:param connector: a changed (but not saved) volume connector object
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if the volume connector's UUID is being
changed
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the connector does
not exist
:raises: VolumeConnectorNotFound if the volume connector cannot be
found
:raises: VolumeConnectorTypeAndIdAlreadyExists if another connector
already exists with the same values for type and connector_id
fields
:returns: updated volume connector object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.35')
return cctxt.call(context, 'update_volume_connector',
connector=connector)
def destroy_volume_target(self, context, target, topic=None):
"""Delete a volume target.
:param context: request context
:param target: volume target object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the target does
not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.37')
return cctxt.call(context, 'destroy_volume_target',
target=target)
def update_volume_target(self, context, target, topic=None):
"""Update the volume target's information.
Update the volume target's information in the database and return a
volume target object. The conductor will lock the related node during
this operation.
:param context: request context
:param target: a changed (but not saved) volume target object
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if the volume target's UUID is being
changed
:raises: NodeLocked if the node is already locked
:raises: NodeNotFound if the node associated with the volume target
does not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:returns: updated volume target object, including all fields
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.37')
return cctxt.call(context, 'update_volume_target',
target=target)
def vif_attach(self, context, node_id, vif_info, topic=None):
"""Attach VIF to a node
:param context: request context.
:param node_id: node ID or UUID.
:param vif_info: a dictionary representing VIF object.
It must have an 'id' key, whose value is a unique
identifier for that VIF.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked, if node has an exclusive lock held on it
:raises: NetworkError, if an error occurs during attaching the VIF.
:raises: InvalidParameterValue, if a parameter that's required for
VIF attach is wrong/missing.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.38')
return cctxt.call(context, 'vif_attach', node_id=node_id,
vif_info=vif_info)
def vif_detach(self, context, node_id, vif_id, topic=None):
"""Detach VIF from a node
:param context: request context.
:param node_id: node ID or UUID.
:param vif_id: an ID of a VIF.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked, if node has an exclusive lock held on it
:raises: NetworkError, if an error occurs during detaching the VIF.
:raises: InvalidParameterValue, if a parameter that's required for
VIF detach is wrong/missing.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.38')
return cctxt.call(context, 'vif_detach', node_id=node_id,
vif_id=vif_id)
def vif_list(self, context, node_id, topic=None):
"""List attached VIFs for a node
:param context: request context.
:param node_id: node ID or UUID.
:param topic: RPC topic. Defaults to self.topic.
:returns: List of VIF dictionaries, each dictionary will have an
'id' entry with the ID of the VIF.
:raises: NetworkError, if an error occurs during listing the VIFs.
:raises: InvalidParameterValue, if a parameter that's required for
VIF list is wrong/missing.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.38')
return cctxt.call(context, 'vif_list', node_id=node_id)
|
|
import urllib
import sys
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.http import SimpleCookie, HttpRequest
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import smart_str
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
class FakePayload(object):
"""
A wrapper around StringIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content):
self.__content = StringIO(content)
self.__len = len(content)
def read(self, num_bytes=None):
if num_bytes is None:
num_bytes = self.__len or 1
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
signals.request_started.send(sender=self.__class__)
try:
request = WSGIRequest(environ)
response = self.get_response(request)
# Apply response middleware.
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
"""
store.setdefault('template',[]).append(template)
store.setdefault('context',[]).append(context)
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, basestring) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
def encode_file(boundary, key, file):
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
return [
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' \
% (to_str(key), to_str(os.path.basename(file.name))),
'Content-Type: application/octet-stream',
'',
file.read()
]
class Client:
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, **defaults):
self.handler = ClientHandler()
self.defaults = defaults
self.cookies = SimpleCookie()
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render)
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info)
try:
response = self.handler(environ)
except TemplateDoesNotExist, e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
# If there was only one template rendered (the most likely case),
# flatten the list to a single element.
for detail in ('template', 'context'):
if data.get(detail):
if len(data[detail]) == 1:
setattr(response, detail, data[detail][0]);
else:
setattr(response, detail, data[detail])
else:
setattr(response, detail, None)
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(self, path, data={}, **extra):
"""
Requests a response from the server using GET.
"""
r = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': urllib.unquote(path),
'QUERY_STRING': urlencode(data, doseq=True),
'REQUEST_METHOD': 'GET',
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"""
Requests a response from the server using POST.
"""
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': urllib.unquote(path),
'REQUEST_METHOD': 'POST',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def login(self, **credentials):
"""
Sets the Client to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
# Save the session values.
request.session.save()
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies.
Causes the authenticated user to be logged out.
"""
session = __import__(settings.SESSION_ENGINE, {}, {}, ['']).SessionStore()
session.delete(session_key=self.cookies[settings.SESSION_COOKIE_NAME].value)
self.cookies = SimpleCookie()
|
|
"""
@package mi.instrument.nortek.vector.ooicore.driver
@file mi/instrument/nortek/vector/ooicore/driver.py
@author Rachel Manoni, Ronald Ronquillo
@brief Driver for the ooicore
Release notes:
Driver for vector
"""
__author__ = 'Rachel Manoni, Ronald Ronquillo'
__license__ = 'Apache 2.0'
import re
import base64
import struct
from mi.core.exceptions import SampleException
from mi.core.common import BaseEnum, Units
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, DataParticleValue
from mi.instrument.nortek.driver import NortekDataParticleType, Parameter, InstrumentCmds, \
USER_CONFIG_DATA_REGEX, validate_checksum
from mi.instrument.nortek.driver import NortekInstrumentDriver
from mi.instrument.nortek.driver import NortekInstrumentProtocol
from mi.instrument.nortek.driver import NortekProtocolParameterDict
from mi.instrument.nortek.driver import InstrumentPrompts
from mi.instrument.nortek.driver import NEWLINE
from mi.core.log import get_logger
log = get_logger()
VELOCITY_DATA_LEN = 24
VELOCITY_DATA_SYNC_BYTES = '\xa5\x10'
SYSTEM_DATA_LEN = 28
SYSTEM_DATA_SYNC_BYTES = '\xa5\x11\x0e\x00'
VELOCITY_HEADER_DATA_LEN = 42
VELOCITY_HEADER_DATA_SYNC_BYTES = '\xa5\x12\x15\x00'
VELOCITY_DATA_PATTERN = r'%s.{22}' % VELOCITY_DATA_SYNC_BYTES
VELOCITY_DATA_REGEX = re.compile(VELOCITY_DATA_PATTERN, re.DOTALL)
SYSTEM_DATA_PATTERN = r'%s.{24}' % SYSTEM_DATA_SYNC_BYTES
SYSTEM_DATA_REGEX = re.compile(SYSTEM_DATA_PATTERN, re.DOTALL)
VELOCITY_HEADER_DATA_PATTERN = r'%s.{38}' % VELOCITY_HEADER_DATA_SYNC_BYTES
VELOCITY_HEADER_DATA_REGEX = re.compile(VELOCITY_HEADER_DATA_PATTERN, re.DOTALL)
VECTOR_SAMPLE_REGEX = [VELOCITY_DATA_REGEX, SYSTEM_DATA_REGEX, VELOCITY_HEADER_DATA_REGEX]
class DataParticleType(NortekDataParticleType):
"""
List of data particles to collect
"""
VELOCITY = 'vel3d_cd_velocity_data'
VELOCITY_HEADER = 'vel3d_cd_data_header'
SYSTEM = 'vel3d_cd_system_data'
class VectorVelocityDataParticleKey(BaseEnum):
"""
Velocity Data Particles
"""
ANALOG_INPUT2 = "analog_input_2"
COUNT = "ensemble_counter"
PRESSURE = "seawater_pressure_mbar"
ANALOG_INPUT1 = "analog_input_1"
VELOCITY_BEAM1 = "turbulent_velocity_east"
VELOCITY_BEAM2 = "turbulent_velocity_north"
VELOCITY_BEAM3 = "turbulent_velocity_vertical"
AMPLITUDE_BEAM1 = "amplitude_beam_1"
AMPLITUDE_BEAM2 = "amplitude_beam_2"
AMPLITUDE_BEAM3 = "amplitude_beam_3"
CORRELATION_BEAM1 = "correlation_beam_1"
CORRELATION_BEAM2 = "correlation_beam_2"
CORRELATION_BEAM3 = "correlation_beam_3"
class VectorVelocityDataParticle(DataParticle):
"""
Routine for parsing velocity data into a data particle structure for the Vector sensor.
"""
_data_particle_type = DataParticleType.VELOCITY
def _build_parsed_values(self):
"""
Take the velocity data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
log.debug('VectorVelocityDataParticle: raw data =%r', self.raw_data)
try:
unpack_string = '<2s4B2H3h6BH'
sync_id, analog_input2_lsb, count, pressure_msb, analog_input2_msb, pressure_lsw, analog_input1,\
velocity_beam1, velocity_beam2, velocity_beam3, amplitude_beam1, amplitude_beam2, amplitude_beam3, \
correlation_beam1, correlation_beam2, correlation_beam3, checksum = struct.unpack(unpack_string, self.raw_data)
if not validate_checksum('<11H', self.raw_data, checksum):
log.warn("Bad vel3d_cd_velocity_data from instrument (%r)", self.raw_data)
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
analog_input2 = analog_input2_msb * 0x100 + analog_input2_lsb
pressure = pressure_msb * 0x10000 + pressure_lsw
except Exception:
log.error('Error creating particle vel3d_cd_velocity_data, raw data: %r', self.raw_data)
raise SampleException
result = [{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.ANALOG_INPUT2, DataParticleKey.VALUE: analog_input2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.COUNT, DataParticleKey.VALUE: count},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.PRESSURE, DataParticleKey.VALUE: pressure},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.ANALOG_INPUT1, DataParticleKey.VALUE: analog_input1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.VELOCITY_BEAM1, DataParticleKey.VALUE: velocity_beam1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.VELOCITY_BEAM2, DataParticleKey.VALUE: velocity_beam2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.VELOCITY_BEAM3, DataParticleKey.VALUE: velocity_beam3},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM1, DataParticleKey.VALUE: amplitude_beam1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM2, DataParticleKey.VALUE: amplitude_beam2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM3, DataParticleKey.VALUE: amplitude_beam3},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.CORRELATION_BEAM1, DataParticleKey.VALUE: correlation_beam1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.CORRELATION_BEAM2, DataParticleKey.VALUE: correlation_beam2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.CORRELATION_BEAM3, DataParticleKey.VALUE: correlation_beam3}]
log.debug('VectorVelocityDataParticle: particle=%s', result)
return result
class VectorVelocityHeaderDataParticleKey(BaseEnum):
"""
Velocity Header data particles
"""
TIMESTAMP = "date_time_string"
NUMBER_OF_RECORDS = "number_velocity_records"
NOISE1 = "noise_amp_beam1"
NOISE2 = "noise_amp_beam2"
NOISE3 = "noise_amp_beam3"
CORRELATION1 = "noise_correlation_beam1"
CORRELATION2 = "noise_correlation_beam2"
CORRELATION3 = "noise_correlation_beam3"
class VectorVelocityHeaderDataParticle(DataParticle):
"""
Routine for parsing velocity header data into a data particle structure for the Vector sensor.
"""
_data_particle_type = DataParticleType.VELOCITY_HEADER
def _build_parsed_values(self):
"""
Take the velocity header data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
log.debug('VectorVelocityHeaderDataParticle: raw data =%r', self.raw_data)
try:
unpack_string = '<4s6sH8B20sH'
sync, timestamp, number_of_records, noise1, noise2, noise3, _, correlation1, correlation2, correlation3, _,\
_, cksum = struct.unpack(unpack_string, self.raw_data)
if not validate_checksum('<20H', self.raw_data, cksum):
log.warn("Bad vel3d_cd_data_header from instrument (%r)", self.raw_data)
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
timestamp = NortekProtocolParameterDict.convert_time(timestamp)
except Exception:
log.error('Error creating particle vel3d_cd_data_header, raw data: %r', self.raw_data)
raise SampleException
result = [{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.TIMESTAMP, DataParticleKey.VALUE: timestamp},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NUMBER_OF_RECORDS, DataParticleKey.VALUE: number_of_records},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE1, DataParticleKey.VALUE: noise1},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE2, DataParticleKey.VALUE: noise2},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE3, DataParticleKey.VALUE: noise3},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION1, DataParticleKey.VALUE: correlation1},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION2, DataParticleKey.VALUE: correlation2},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION3, DataParticleKey.VALUE: correlation3}]
log.debug('VectorVelocityHeaderDataParticle: particle=%s', result)
return result
class VectorSystemDataParticleKey(BaseEnum):
"""
System data particles
"""
TIMESTAMP = "date_time_string"
BATTERY = "battery_voltage_dv"
SOUND_SPEED = "sound_speed_dms"
HEADING = "heading_decidegree"
PITCH = "pitch_decidegree"
ROLL = "roll_decidegree"
TEMPERATURE = "temperature_centidegree"
ERROR = "error_code"
STATUS = "status_code"
ANALOG_INPUT = "analog_input"
class VectorSystemDataParticle(DataParticle):
"""
Routine for parsing system data into a data particle structure for the Vector sensor.
"""
_data_particle_type = DataParticleType.SYSTEM
def _build_parsed_values(self):
"""
Take the system data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
log.debug('VectorSystemDataParticle: raw data =%r', self.raw_data)
try:
unpack_string = '<4s6s2H4h2bHH'
sync, timestamp, battery, sound_speed, heading, pitch, roll, temperature, error, status, analog_input, cksum =\
struct.unpack_from(unpack_string, self.raw_data)
if not validate_checksum('<13H', self.raw_data, cksum):
log.warn("Bad vel3d_cd_system_data from instrument (%r)", self.raw_data)
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
timestamp = NortekProtocolParameterDict.convert_time(timestamp)
except Exception:
log.error('Error creating particle vel3d_cd_system_data, raw data: %r', self.raw_data)
raise SampleException
result = [{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.TIMESTAMP, DataParticleKey.VALUE: timestamp},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.BATTERY, DataParticleKey.VALUE: battery},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.SOUND_SPEED, DataParticleKey.VALUE: sound_speed},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.HEADING, DataParticleKey.VALUE: heading},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.PITCH, DataParticleKey.VALUE: pitch},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.ROLL, DataParticleKey.VALUE: roll},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.TEMPERATURE, DataParticleKey.VALUE: temperature},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.ERROR, DataParticleKey.VALUE: error},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.STATUS, DataParticleKey.VALUE: status},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.ANALOG_INPUT, DataParticleKey.VALUE: analog_input}]
log.debug('VectorSystemDataParticle: particle=%r', result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(NortekInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def __init__(self, evt_callback):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
NortekInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(InstrumentPrompts, NEWLINE, self._driver_event)
###############################################################################
# Protocol
################################################################################
class Protocol(NortekInstrumentProtocol):
"""
Instrument protocol class
Subclasses NortekInstrumentProtocol
"""
NortekInstrumentProtocol.velocity_data_regex.extend(VECTOR_SAMPLE_REGEX)
NortekInstrumentProtocol.velocity_sync_bytes = VELOCITY_DATA_SYNC_BYTES
order_of_user_config = [
Parameter.TRANSMIT_PULSE_LENGTH,
Parameter.BLANKING_DISTANCE,
Parameter.RECEIVE_LENGTH,
Parameter.TIME_BETWEEN_PINGS,
Parameter.TIME_BETWEEN_BURST_SEQUENCES,
Parameter.NUMBER_PINGS,
Parameter.AVG_INTERVAL,
Parameter.USER_NUMBER_BEAMS,
Parameter.TIMING_CONTROL_REGISTER,
Parameter.POWER_CONTROL_REGISTER,
Parameter.A1_1_SPARE,
Parameter.B0_1_SPARE,
Parameter.B1_1_SPARE,
Parameter.COMPASS_UPDATE_RATE,
Parameter.COORDINATE_SYSTEM,
Parameter.NUMBER_BINS,
Parameter.BIN_LENGTH,
Parameter.MEASUREMENT_INTERVAL,
Parameter.DEPLOYMENT_NAME,
Parameter.WRAP_MODE,
Parameter.CLOCK_DEPLOY,
Parameter.DIAGNOSTIC_INTERVAL,
Parameter.MODE,
Parameter.ADJUSTMENT_SOUND_SPEED,
Parameter.NUMBER_SAMPLES_DIAGNOSTIC,
Parameter.NUMBER_BEAMS_CELL_DIAGNOSTIC,
Parameter.NUMBER_PINGS_DIAGNOSTIC,
Parameter.MODE_TEST,
Parameter.ANALOG_INPUT_ADDR,
Parameter.SW_VERSION,
Parameter.USER_1_SPARE,
Parameter.VELOCITY_ADJ_TABLE,
Parameter.COMMENTS,
Parameter.WAVE_MEASUREMENT_MODE,
Parameter.DYN_PERCENTAGE_POSITION,
Parameter.WAVE_TRANSMIT_PULSE,
Parameter.WAVE_BLANKING_DISTANCE,
Parameter.WAVE_CELL_SIZE,
Parameter.NUMBER_DIAG_SAMPLES,
Parameter.A1_2_SPARE,
Parameter.B0_2_SPARE,
Parameter.NUMBER_SAMPLES_PER_BURST,
Parameter.SAMPLE_RATE,
Parameter.ANALOG_OUTPUT_SCALE,
Parameter.CORRELATION_THRESHOLD,
Parameter.USER_3_SPARE,
Parameter.TRANSMIT_PULSE_LENGTH_SECOND_LAG,
Parameter.USER_4_SPARE,
Parameter.QUAL_CONSTANTS]
spare_param_values = {Parameter.A1_1_SPARE: '',
Parameter.B0_1_SPARE: '',
Parameter.B1_1_SPARE: '',
Parameter.USER_1_SPARE: '',
Parameter.A1_2_SPARE: '',
Parameter.B0_2_SPARE: '',
Parameter.USER_3_SPARE: '',
Parameter.USER_4_SPARE: ''}
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
super(Protocol, self).__init__(prompts, newline, driver_event)
########################################################################
# overridden superclass methods
########################################################################
def _got_chunk(self, structure, timestamp):
"""
The base class got_data has gotten a structure from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
self._extract_sample(VectorVelocityDataParticle, VELOCITY_DATA_REGEX, structure, timestamp)
self._extract_sample(VectorSystemDataParticle, SYSTEM_DATA_REGEX, structure, timestamp)
self._extract_sample(VectorVelocityHeaderDataParticle, VELOCITY_HEADER_DATA_REGEX, structure, timestamp)
self._got_chunk_base(structure, timestamp)
def _update_params(self):
"""
Update the parameter dictionary. Issue the read config command. The response
needs to be saved to param dictionary.
"""
ret_config = self._do_cmd_resp(InstrumentCmds.READ_USER_CONFIGURATION, response_regex=USER_CONFIG_DATA_REGEX)
self._param_dict.update(ret_config)
self.spare_param_values[Parameter.A1_1_SPARE] = ret_config[24:26]
self.spare_param_values[Parameter.B0_1_SPARE] = ret_config[26:28]
self.spare_param_values[Parameter.B1_1_SPARE] = ret_config[28:30]
self.spare_param_values[Parameter.USER_1_SPARE] = ret_config[74:76]
self.spare_param_values[Parameter.A1_2_SPARE] = ret_config[448:450]
self.spare_param_values[Parameter.B0_2_SPARE] = ret_config[450:452]
self.spare_param_values[Parameter.USER_3_SPARE] = ret_config[460:462]
self.spare_param_values[Parameter.USER_4_SPARE] = ret_config[464:494]
def _create_set_output(self, parameters):
"""
load buffer with sync byte (A5), ID byte (01), and size word (# of words in little-endian form)
'user' configuration is 512 bytes = 256 words long = size 0x100
"""
output = ['\xa5\x00\x00\x01']
CHECK_SUM_SEED = 0xb58c
for param in self.order_of_user_config:
log.trace('_create_set_output: adding %s to list', param)
if param == Parameter.COMMENTS:
output.append(parameters.format(param).ljust(180, "\x00"))
elif param == Parameter.DEPLOYMENT_NAME:
output.append(parameters.format(param).ljust(6, "\x00"))
elif param == Parameter.QUAL_CONSTANTS:
output.append('\x00'.ljust(16, "\x00"))
elif param == Parameter.VELOCITY_ADJ_TABLE:
output.append(base64.b64decode(parameters.format(param)))
elif param in [Parameter.A1_1_SPARE, Parameter.B0_1_SPARE, Parameter.B1_1_SPARE, Parameter.USER_1_SPARE,
Parameter.A1_2_SPARE, Parameter.B0_2_SPARE, Parameter.USER_2_SPARE, Parameter.USER_3_SPARE]:
output.append(self.spare_param_values.get(param).ljust(2, "\x00"))
elif param in [Parameter.WAVE_MEASUREMENT_MODE, Parameter.WAVE_TRANSMIT_PULSE, Parameter.WAVE_BLANKING_DISTANCE,
Parameter.WAVE_CELL_SIZE, Parameter.NUMBER_DIAG_SAMPLES, Parameter.DYN_PERCENTAGE_POSITION]:
output.append('\x00'.ljust(2, "\x00"))
elif param == Parameter.USER_4_SPARE:
output.append(self.spare_param_values.get(param).ljust(30, "\x00"))
else:
output.append(parameters.format(param))
log.trace('_create_set_output: ADDED %s output size = %s', param, len(output))
log.debug("Created set output: %r with length: %s", output, len(output))
checksum = CHECK_SUM_SEED
output = "".join(output)
for word_index in range(0, len(output), 2):
word_value = NortekProtocolParameterDict.convert_word_to_int(output[word_index:word_index+2])
checksum = (checksum + word_value) % 0x10000
log.debug('_create_set_output: user checksum = %r', checksum)
output += (NortekProtocolParameterDict.word_to_string(checksum))
return output
########################################################################
# Private helpers.
########################################################################
def _build_param_dict(self):
"""
Overwrite base classes method.
Creates base class's param dictionary, then sets parameter values for those specific to this instrument.
"""
NortekInstrumentProtocol._build_param_dict(self)
self._param_dict.add(Parameter.TRANSMIT_PULSE_LENGTH,
r'^.{%s}(.{2}).*' % str(4),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Transmit Pulse Length",
description="Pulse duration of the transmitted signal.",
default_value=2,
units=Units.COUNTS,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.BLANKING_DISTANCE,
r'^.{%s}(.{2}).*' % str(6),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Blanking Distance",
description="Minimum sensing range of the sensor.",
default_value=16,
units=Units.COUNTS,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.RECEIVE_LENGTH,
r'^.{%s}(.{2}).*' % str(8),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Receive Length",
description="Length of the received pulse.",
default_value=7,
units=Units.COUNTS,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.TIME_BETWEEN_PINGS,
r'^.{%s}(.{2}).*' % str(10),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Time Between Pings",
description="Length of time between each ping.",
default_value=44,
units=Units.COUNTS,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.TIME_BETWEEN_BURST_SEQUENCES,
r'^.{%s}(.{2}).*' % str(12),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Time Between Burst Sequences",
description="Length of time between each burst.",
default_value=0,
units=Units.COUNTS,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.NUMBER_PINGS,
r'^.{%s}(.{2}).*' % str(14),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Number Pings",
description="Number of pings in each burst sequence.",
default_value=0,
units=Units.HERTZ,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.AVG_INTERVAL,
r'^.{%s}(.{2}).*' % str(16),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Average Interval",
description="Interval for continuous sampling.",
default_value=64,
units=Units.SECOND,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MEASUREMENT_INTERVAL,
r'^.{%s}(.{2}).*' % str(38),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Measurement Interval",
description="Interval for single measurements.",
units=Units.SECOND,
default_value=600,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.DIAGNOSTIC_INTERVAL,
r'^.{%s}(.{4}).*' % str(54),
lambda match: NortekProtocolParameterDict.convert_double_word_to_int(match.group(1)),
NortekProtocolParameterDict.double_word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Diagnostic Interval",
description='Number of seconds between diagnostics measurements.',
default_value=10800,
units=Units.SECOND,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.ADJUSTMENT_SOUND_SPEED,
r'^.{%s}(.{2}).*' % str(60),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Adjustment Sound Speed",
description='User input sound speed adjustment factor.',
default_value=16657,
units=Units.METER + '/' + Units.SECOND,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.NUMBER_SAMPLES_DIAGNOSTIC,
r'^.{%s}(.{2}).*' % str(62),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Diagnostic Samples",
description='Number of samples in diagnostics mode.',
default_value=1,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.SW_VERSION,
r'^.{%s}(.{2}).*' % str(72),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.STRING,
visibility=ParameterDictVisibility.READ_ONLY,
direct_access=True,
display_name="Software Version",
description="Current software version installed on instrument.")
self._param_dict.add(Parameter.SAMPLE_RATE,
r'^.{%s}(.{2}).*' % str(454),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Sample Rate",
description="Number of samples per burst.",
default_value=16,
startup_param=True)
self._param_dict.add(Parameter.ANALOG_OUTPUT_SCALE,
r'^.{%s}(.{2}).*' % str(456),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Analog Output Scale Factor",
description="Scale factor used in calculating analog output.",
default_value=6711,
startup_param=True,
direct_access=True)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script takes CWB-SGML format input
and outputs EtherCalc/SocialCalc spreadsheet data
Author: Amir Zeldes
"""
import re, tempfile, subprocess, os
from collections import defaultdict
from collections import OrderedDict
from operator import itemgetter
from gitdox_sql import *
from os import listdir
from os.path import isfile, join
from configobj import ConfigObj
from ast import literal_eval
import json
from copy import copy
import cgi
import requests
from xml.sax.saxutils import escape
__version__ = "2.0.0"
class ExportConfig:
def __init__(self, **kwargs):
"""
:param kwargs:
config=None, aliases=None, priorities=None, milestones=None, no_content=None, tok_annos=None
"""
self.config = kwargs.get("config",None)
self.export_all = True
if self.config is None:
self.aliases = kwargs.get("aliases",{})
self.priorities = kwargs.get("priorities",[])
self.milestones = kwargs.get("milestones",[])
self.no_content = kwargs.get("no_content",[])
self.no_ignore = kwargs.get("no_ignore",True)
self.tok_annos = kwargs.get("tok_annos",[])
self.template = "<meta %%all%%>\n%%body%%\n</meta>\n"
else:
if not self.config.endswith(".ini"):
self.config += ".ini"
self.read_config(self.config)
# Anything that is 'no_content' must have some sort of priority
for anno in sorted(self.no_content):
if anno not in self.priorities:
self.priorities.append(anno)
# Anything that is in 'milestones' must have some sort of priority
for anno in sorted(self.milestones):
if anno not in self.priorities:
self.priorities.append(anno)
# Anything that is in 'tok_annos' must have some sort of priority
for anno in sorted(self.tok_annos):
if anno not in self.priorities:
self.priorities.append(anno)
def read_config(self,config_file):
config = ConfigObj(os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep + "schemas" + os.sep + config_file)
if config.has_key("aliases"):
self.aliases = literal_eval(config["aliases"])
else:
self.aliases = {}
if config.has_key("priorities"):
self.priorities = literal_eval(config["priorities"])
else:
self.priorities = []
if config.has_key("milestones"):
self.milestones = literal_eval(config["milestones"])
else:
self.milestones = []
if config.has_key("no_content"):
self.no_content = literal_eval(config["no_content"])
else:
self.no_content = []
if config.has_key("tok_annos"):
self.tok_annos = literal_eval(config["tok_annos"])
else:
self.tok_annos = []
if config.has_key("export_all"):
self.export_all = config["export_all"].lower() == "true"
if config.has_key("no_ignore"):
self.no_ignore = config["no_ignore"].lower() == "true"
else:
self.no_ignore = True
if config.has_key("template"):
self.template = config["template"]
else:
self.template = "<meta %%all%%>\n%%body%%\n</meta>\n"
def parse_ether(ether, doc_id=None):
"""Take in raw socialcalc data and turn it into a dict of Cells. Used in validation."""
class Cell:
def __init__(self, col, row, content, span):
self.col = col
self.row = row
self.header = ""
self.content = content
self.span = span
def __repr__(self):
return "<Cell (" + repr((self.col, self.row, self.header, self.content, self.span)) + ")>"
ether_lines = ether.splitlines()
# find col letter corresponding to col name
parsed = defaultdict(list)
colmap = defaultdict(list)
rev_colmap = {}
all_cells = []
for line in ether_lines:
if line.startswith("cell:"): # Cell row
# A maximal row looks like this incl. span: cell:F2:t:LIRC2014_chw0oir:f:1:rowspan:289
# A minimal row without formatting: cell:C2:t:JJ:f:1
parts = line.split(":")
if len(parts) > 3: # Otherwise invalid row
cell_id = parts[1]
cell_row = cell_id[1:]
cell_col = cell_id[0]
# We'd need something like this to support more than 26 cols, i.e. columns AA, AB...
# for c in cell_id:
# if c in ["0","1","2","3","4","5","6","7","8","9"]:
# cell_row += c
# else:
# cell_col += c
cell_content = parts[3].replace("\\c", ":")
cell_span = parts[-1] if "rowspan:" in line else "1"
# record col name
if cell_row == "1":
colmap[cell_content].append(cell_col)
rev_colmap[cell_col] = cell_content
cell = Cell(cell_col, cell_row, cell_content, cell_span)
parsed[cell_col].append(cell)
all_cells.append(cell)
for cell in all_cells:
if cell.col in rev_colmap:
cell.header = rev_colmap[cell.col]
else:
if doc_id is None:
doc_id = "unknown"
raise IOError("Undocumented column: " + cell.col + " in '" + str(cell) + " from doc: " + str(doc_id))
parsed["__colmap__"] = colmap # Save colmap for apply_rule
return parsed
def unescape_xml(text):
# Fix various common compounded XML escapes
text = text.replace("&lt;","<").replace("&gt;",">")
text = text.replace("<","<").replace(">",">")
text = text.replace("&","&")
return text
def build_meta_tag(doc_id):
meta = "<meta"
meta_items = []
meta_rows = get_doc_meta(doc_id)
# docid,metaid,key,value - four cols
for item in meta_rows:
key, value = item[2], item[3]
if not key.startswith("ignore:"):
key = key.replace("=", "=") # Key may not contain equals sign
value = value.replace('"', """) # Value may not contain double quotes
value = unescape_xml(value)
meta_items.append(key + '="' + value + '"')
meta_props = " ".join(meta_items)
if meta_props != "":
meta_props = " " + meta_props
output = meta + meta_props + ">\n"
output = output.replace("<meta >","<meta>")
return output
def fill_meta_template(doc_id,template):
meta_items = []
meta_dict = {}
meta_rows = get_doc_meta(doc_id)
doc_info = get_doc_info(doc_id)
name, corpus = [str(x) for x in doc_info[0:2]]
# docid,metaid,key,value - four cols
for item in meta_rows:
key, value = str(item[2].encode("utf8")), str(item[3].encode("utf8"))
if not key.startswith("ignore:"):
key = key.replace("=", "=")
value = value.replace('"', """)
value = unescape_xml(value)
meta_items.append(escape(key) + '="' + escape(value) + '"')
meta_dict[escape(key)] = escape(value)
meta_props = " ".join(meta_items)
"""
if "%%all:" in template: # Check for instruction to embed all metadata in an existing tag
m = re.search(r"%%all:([^%]+)%",template)
if m is not None:
meta_tag = m.group(1)
template = re.sub("(<" + meta_tag + "[^>]*)>", "\1 " + meta_props + ">", template)
template = re.sub(r"%%all:([^%]+)%","",template)
else:
"""
template = template.replace("%%all%%", meta_props)
template = template.replace("%%name%%", name)
template = template.replace("%%corpus%%", corpus)
for key in meta_dict:
if key != "body": # Never overwrite body template position
template = template.replace("%%" + key + "%%",meta_dict[key])
template = template.replace("<meta >","<meta>")
return template
def get_file_list(path,extension,hide_extension=False,forbidden=None):
if forbidden is None:
forbidden = []
if not extension.startswith("."):
extension = "." + extension
outfiles = []
files = [f for f in listdir(path) if isfile(join(path, f))]
for filename in sorted(files):
if filename.endswith(extension) and filename not in forbidden:
if hide_extension:
filename = filename.replace(extension, "")
if filename not in forbidden:
outfiles.append(filename)
return outfiles
def get_ether_stylesheets():
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
stylesheet_dir = scriptpath + os.sep + ".." + os.sep + "schemas" + os.sep
stylesheet_list = get_file_list(stylesheet_dir,"ini",hide_extension=True)
return stylesheet_list
def flush_open(annos, row_num, colmap):
flushed = ""
for anno in annos:
element, name, value = anno
flushed += "cell:"+colmap[name] + str(row_num) + ":t:" + value + "\n" # NO t >TVF
return flushed
def flush_close(closing_element, last_value, last_start, row_num, colmap, aliases):
flushed = ""
for alias in aliases[closing_element][-1]:
stack_len = len(last_start[alias])
if stack_len > 0 and last_start[alias][-1] < row_num - 1:
span_string = ":rowspan:" + str(row_num - last_start[alias][-1])
else:
span_string = ""
# Use t for tvf to leave links on
flushed += ("cell:"
+ colmap[alias][stack_len - 1]
+ str(last_start[alias][-1])
+ ":t:" + str(last_value[alias][-1])
+ ":f:1:tvf:1" + span_string + "\n")
# pop the stack since we've closed a tag
last_value[alias].pop()
last_start[alias].pop()
aliases[closing_element].pop()
return flushed
def number_to_letters(number):
if number < 27:
return chr(number + ord('a') - 1).upper()
else:
char1 = chr((number // 26) + ord('a')-1).upper()
char2 = chr((number % 26) + ord('a')-1).upper()
return char1 + char2
def sgml_to_ether(sgml, ignore_elements=False):
open_annos = defaultdict(list)
# a mapping from a tag name to a list of values. the list is a stack
# where the most recently encountered opening tag's value/start row
# is kept on the right side of the list. whenever we see a closing tag
# we pop from the stack, and whenever we see an opening tag we push
# (append) to the stack
last_value = defaultdict(list)
last_start = defaultdict(list)
# maps from tags to a similar stack data structure where the top of the stack
# (i.e. the right side of the list) contains all the annotations that were
# present on the most recently opened nested element
aliases = defaultdict(list)
# values in this dict are also lists which follow the pattern described above
colmap = OrderedDict()
preamble = """socialcalc:version:1.0
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=SocialCalcSpreadsheetControlSave
--SocialCalcSpreadsheetControlSave
Content-type: text/plain; charset=UTF-8
# SocialCalc Spreadsheet Control Save
version:1.0
part:sheet
part:edit
part:audit
--SocialCalcSpreadsheetControlSave
Content-type: text/plain; charset=UTF-8
version:1.5
"""
sgml = sgml.replace("\r","")
output = ""
maxcol = 1
current_row = 2
for line in sgml.strip().split("\n"):
line = line.strip()
# SocialCalc uses colons internally, \\c used to repr colon in data
line = line.replace(":","\\c")
if line.startswith("<?") or line.endswith("/>"): # Skip unary tags and XML instructions
continue
elif line.startswith("<meta") or line.startswith("</meta"): # meta tags
continue
elif line.startswith("</"): # Closing tag
my_match = re.match("</([^>]+)>",line)
element = my_match.groups(0)[0]
output += flush_close(element, last_value, last_start, current_row, colmap, aliases)
elif line.startswith("<"): # Opening tag
my_match = re.match("<([^ >]+)[ >]",line)
element = my_match.groups(0)[0]
aliases[element].append([]) # Add new set of aliases to see which attributes this instance has
if "=" not in line:
line = "<" + element + " " + element + '="' + element + '">'
attrs = re.findall('([^" =]+)="([^"]+)"',line)
anno_name = ""
anno_value = ""
for attr in attrs:
if element != attr[0] and ignore_elements is False:
if attr[0] == "xml\\clang":
anno_name = "lang" # TODO: de-hardwire fix for xml:lang
else:
anno_name = element + "_" + attr[0]
else:
anno_name = attr[0]
anno_value = attr[1]
open_annos[current_row].append((anno_name,anno_value))
last_value[anno_name].append(anno_value)
last_start[anno_name].append(current_row)
if anno_name not in aliases[element][-1]:
aliases[element][-1].append(anno_name)
if anno_name not in colmap:
maxcol += 1
colmap[anno_name] = [number_to_letters(maxcol)]
elif anno_name in colmap and \
len(last_start[anno_name]) > len(colmap[anno_name]):
maxcol += 1
colmap[anno_name].append(number_to_letters(maxcol))
elif len(line) > 0: # Token
token = line.strip()
output += "cell:A"+str(current_row)+":t:"+token+":f:1:tvf:1\n" # NO f <> tvf for links
current_row +=1
else: # Empty line
current_row +=1
preamble += "cell:A1:t:tok:f:2\n" # f <> tvf for links
output = preamble + output
for header in colmap:
for entry in colmap[header]:
output += "cell:"+entry+"1:t:"+header+":f:2\n" # NO f <> tvf for links
output += "\nsheet:c:" + str(maxcol) + ":r:" + str(current_row-1) + ":tvf:1\n"
# Prepare default Antinoou font for Coptic data
output += """
font:1:* * Antinoou
font:2:normal bold * *
valueformat:1:text-plain
--SocialCalcSpreadsheetControlSave
Content-type: text/plain; charset=UTF-8
version:1.0
rowpane:0:1:1
colpane:0:1:1
ecell:A1
--SocialCalcSpreadsheetControlSave
Content-type: text/plain; charset=UTF-8
--SocialCalcSpreadsheetControlSave--
"""
return output
def ether_to_csv(ether_path, name):
try:
r = requests.get(ether_path + "_/" + name + "/csv/")
except:
return ""
return r.text
def strip_unique_identifier(tag):
"""Given an SGML closing or opening tag, replace anything that looks
like __\d+__ on the end of the tag name, assuming that we were the
ones who added it."""
try:
tag_name = re.match("^</?([^ >]+)", tag).groups(0)[0]
except AttributeError:
return tag
orig_tag_name = re.sub("__\d+__$", "", tag_name)
tag = tag.replace("<" + tag_name, "<" + orig_tag_name)
tag = tag.replace("</" + tag_name, "</" + orig_tag_name)
tag = tag.replace(tag_name + "=" + '"' + orig_tag_name + '"',
orig_tag_name + "=" + '"' + orig_tag_name + '"')
return tag
def deunique_should_skip_line(line):
return (not line.startswith("<") # tokens
or line.startswith("<?") # xml instrs
or line.endswith("/>") # unary tags
or line.startswith("<meta") # meta
or line.startswith("</meta"))
def reverse_adjacent_closing_tags(lines):
"""Finds sublists like ['</e>', '</e__2__>'] and replaces them with
['</e__2__>', '</e>']"""
def swap_run(l, start, end):
l[start:end] = l[start:end][::-1]
run_start = None
for i, line in enumerate(lines):
if line.startswith("</"):
if run_start is not None:
deuniqued_tag = strip_unique_identifier(line)
if deuniqued_tag != lines[run_start]:
swap_run(lines, run_start, i)
run_start = None
else:
run_start = i
elif run_start is not None:
swap_run(lines, run_start, i)
run_start = None
else:
run_start = None
if run_start is not None:
swap_run(lines, run_start, i+1)
return lines
def deunique_properly_nested_tags(sgml):
"""Use a silly n^2 algorithm to detect properly nested tags and strip
them of their unique identifiers. Probably an n algorithm to do this."""
lines = sgml.split("\n")
lines = reverse_adjacent_closing_tags(lines)
output = copy(lines)
for i, line in enumerate(lines):
if deunique_should_skip_line(line) or line.startswith("</"):
continue
# if we've gotten this far, we have an opening tag--store the tag name
open_element = re.match("<([^ >]+)[ >]", line).groups(0)[0]
open_counts = defaultdict(int)
for j, line2 in enumerate(lines[i:]):
if deunique_should_skip_line(line2):
continue
if line2.startswith("</"):
element = re.match("</([^>]+)>", line2).groups(0)[0]
open_counts[element] -= 1
if element == open_element:
break
else:
element = re.match("<([^ >]+)[ >]", line2).groups(0)[0]
open_counts[element] += 1
# element is properly nested if no element was opened in the block that
# was not also closed in the block or vice versa
if sum(open_counts.values()) == 0:
output[i] = strip_unique_identifier(output[i])
output[i+j] = strip_unique_identifier(output[i+j])
output = reverse_adjacent_closing_tags(output)
return "\n".join(output)
def ether_to_sgml(ether, doc_id,config=None):
"""
:param ether: String in SocialCalc format
:param doc_id: GitDox database internal document ID number as string
:param config: Name of an export config (.ini file) under schemas/
:return:
"""
if config is None or config == "--default--":
config = ExportConfig()
else:
config = ExportConfig(config=config)
# mapping from col header (meaningful string) to the col letter
colmap = {}
# list of 3-tuples of parsed cells: (col, row, contents)
cells = []
if isinstance(ether,unicode):
ether = ether.encode("utf8")
# Destroy empty span cells without content, typically nested underneath longer, filled spans
ether = re.sub(r'cell:[A-Z]+[0-9]+:f:1:rowspan:[0-9]+','',ether)
# Ensure that cell A1 is treated as 'tok' if the header was deleted
ether = re.sub(r'cell:A1:f:([0-9]+)',r"cell:A1:t:tok:f:\1",ether)
# parse cell contents into cells
for line in ether.splitlines():
parsed_cell = re.match(r'cell:([A-Z]+)(\d+):(.*)$', line)
if parsed_cell is not None:
col = parsed_cell.group(1)
row = int(parsed_cell.group(2))
other = parsed_cell.group(3).split(':')
cellinfo = {}
i = 0
while i+1 < len(other):
cellinfo[other[i]] = other[i+1]
i += 2
cells.append((col, row, cellinfo))
cells = sorted(cells, key=itemgetter(1)) # so header row gets read first
open_tags = defaultdict(lambda: defaultdict(list))
last_open_index = defaultdict(int)
open_tag_length = defaultdict(int)
open_tag_order = defaultdict(list)
last_row = 1
toks = {}
sec_element_checklist = []
row = 1
# added to support duplicate columns
namecount = defaultdict(int)
close_tags = defaultdict(list)
for cell in cells:
# Header row
if cell[1] == 1:
colname = cell[2]['t'].replace("\\c",":")
if colname in config.aliases:
colname = config.aliases[colname]
# if we've already seen a tag of this name, prepare to make it unique
namecount[colname] += 1
if namecount[colname] > 1:
dupe_suffix = "__" + str(namecount[colname]) + "__"
else:
dupe_suffix = ""
if "@" in colname:
unique_colname = colname.replace("@", dupe_suffix + "@")
else:
unique_colname = colname + dupe_suffix
colmap[cell[0]] = unique_colname
# Make sure that everything that should be exported has some priority
if unique_colname.split("@",1)[0] not in config.priorities and config.export_all:
if not unique_colname.lower().startswith("ignore:"):
elem = unique_colname.split("@",1)[0]
config.priorities.append(elem)
# All other rows
else:
col = cell[0]
row = cell[1]
if col in colmap:
col_name = colmap[col]
else:
raise IOError("Column " + col + " not found in doc_id " + str(doc_id))
# If the column specifies an attribute name, use it, otherwise use the element's name again
if "@" in col_name:
element, attrib = col_name.split("@",1)
else:
element = col_name
attrib = element
# Check whether attrib contains a constant value instruction
const_val = ""
if "=" in attrib:
attrib, const_val = attrib.split("=",1)
# Check to see if the cell has been merged with other cells
if 'rowspan' in cell[2]:
rowspan = int(cell[2]['rowspan'])
else:
rowspan = 1
# Check for flexible element, e.g. m|w@x means 'prefer to attach x to m, else to w'
if "|" in element:
element, sec_element = element.split("|",1)
else:
sec_element = ""
# Move on to next cell if this is not a desired column
if element not in config.priorities or (element.startswith("ignore:") and config.no_ignore): # Guaranteed to be in priorities if it should be included
continue
# New row starting from this cell, sort previous lists for opening and closing orders
if row != last_row:
for element in open_tags[last_row]:
open_tag_order[last_row].append(element)
open_tag_order[last_row].sort(key=lambda x: (-open_tag_length[x],config.priorities.index(x)))
for sec_tuple in sec_element_checklist:
prim_found = False
prim_elt, sec_elt, attr, val, span = sec_tuple
if prim_elt in open_tags[last_row] and prim_elt in open_tag_length:
if span == open_tag_length[prim_elt]:
open_tags[last_row][prim_elt].append((attr, val))
close_tags[last_row + span].append(prim_elt)
prim_found = True
if not prim_found:
if sec_elt in open_tags[last_row] and sec_elt in open_tag_length:
if span == open_tag_length[sec_elt]:
open_tags[last_row][sec_elt].append((attr, val))
close_tags[last_row + span].append(sec_elt)
sec_element_checklist = [] # Purge sec_elements
close_tags[row].sort(key=lambda x: (last_open_index[x],config.priorities.index(x)), reverse=True)
last_row = row
if const_val != "":
content = const_val
else:
if 't' in cell[2]: # cell contains text
content = cell[2]['t']
elif 'v' in cell[2]: # cell contains numerical value
content = cell[2]['v']
elif col_name != 'tok':
continue # cell does not contain a value and this is not a token entry
if col_name == 'tok':
if "<" in content or "&" in content or ">" in content:
content = escape(content)
toks[row] = content
else:
if element in config.no_content:
if element == attrib:
attrib = ""
if attrib in config.tok_annos:
# TT SGML token annotation, append to token with tab separator and move on
if "<" in content or "&" in content or ">" in content:
content = escape(content)
toks[row] += "\t" + content
continue
if element not in config.priorities and len(config.priorities) > 0:
# Priorities have been supplied, but this column is not in them
continue
# content may not contain straight double quotes in span annotations in SGML export
# Note that " is allowed in tokens and in tab-delimited token annotations!
content = content.replace('"', """)
if sec_element != "":
#open_tags[row][sec_element].append((attrib, content))
sec_element_checklist.append((element,sec_element,attrib,content,rowspan))
continue
open_tags[row][element].append((attrib, content))
last_open_index[element] = int(row)
if 'rowspan' in cell[2]:
close_row = row + rowspan
else:
close_row = row + 1
# this introduces too many close tags for elts that have more than one attr.
# We take care of this later with close_tag_debt
close_tags[close_row].append(element)
open_tag_length[element] = int(close_row) - int(last_open_index[element])
# Sort last row tags
if row + 1 in close_tags:
close_tags[row+1].sort(key=lambda x: (last_open_index[x],config.priorities.index(x)), reverse=True)
for element in open_tags[last_row]:
open_tag_order[last_row].append(element)
open_tag_order[last_row].sort(key=lambda x: (-open_tag_length[x],config.priorities.index(x)))
#output = build_meta_tag(doc_id)
template = fill_meta_template(doc_id,config.template)
output = ""
close_tag_debt = defaultdict(int)
for r in xrange(2, sorted(close_tags.keys())[-1] + 1):
for element in close_tags[r]:
if element != "" and element not in config.milestones:
if close_tag_debt[element] > 0:
close_tag_debt[element] -= 1
else:
output += '</' + element + '>\n'
for element in open_tag_order[r]:
tag = '<' + element
attr_count = 0
for attrib, value in open_tags[r][element]:
if attrib != "":
tag += ' ' + attrib + '="' + value + '"'
attr_count += 1
close_tag_debt[element] = len(open_tags[r][element]) - 1
if element in config.milestones:
tag += '/>\n'
else:
tag += '>\n'
output += tag
if r not in toks:
toks[r] = "" # Caution - empty token!
output += toks[r] + '\n'
output = output.replace('\\c', ':')
#output += "</meta>\n"
if "%%body%%" in template:
output = template.replace("%%body%%",output.strip())
output = re.sub("%%[^%]+%%", "none", output)
# fix tags that look like elt__2__ if it still gives correct sgml
output = deunique_properly_nested_tags(output)
return output
def exec_via_temp(input_text, command_params, workdir=""):
temp = tempfile.NamedTemporaryFile(delete=False,mode='wb')
exec_out = ""
try:
temp.write(input_text)
temp.close()
#command_params = [x if 'tempfilename' not in x else x.replace("tempfilename",temp.name) for x in command_params]
command_params = command_params.replace("tempfilename",temp.name)
if workdir == "":
proc = subprocess.Popen(command_params, stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
(stdout, stderr) = proc.communicate()
else:
proc = subprocess.Popen(command_params, stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE,cwd=workdir)
(stdout, stderr) = proc.communicate()
except Exception as e:
return str(e)
finally:
os.remove(temp.name)
return stdout, stderr
def fix_colnames(socialcalc):
# Hard-wired fixes for Scriptorium layer names that should be collapsed if they appear
# TODO: make this configurable somewhere
socialcalc = re.sub(r'(:[A-Z]1:t:)norm_group_((orig_group):)',r'\1\2',socialcalc)
socialcalc = re.sub(r'(:[A-Z]1:t:)norm_((orig|pos|lemma|lang):)', r'\1\2', socialcalc)
socialcalc = re.sub(r'(:[A-Z]1:t:)morph_((orig|pos|lemma|lang):)', r'\1\2', socialcalc)
socialcalc = re.sub(r'(:[A-Z]1:t:)norm_xml\\c((orig|pos|lemma|lang):)', r'\1\2', socialcalc)
socialcalc = re.sub(r'(:[A-Z]1:t:)morph_xml\\c((orig|pos|lemma|lang):)', r'\1\2', socialcalc)
return socialcalc
def postprocess_sgml(sgml,instructions=None):
"""Function to clean up NLP output"""
if instructions is None:
return sgml
else:
remove = set([])
rename = {}
for instruction in instructions:
parts = instruction.split("/")
if len(parts) ==3:
subj, pred, obj = parts
elif len(parts) ==2:
subj, pred = parts
else:
subj, pred, obj = None, None, None
if pred == "remove":
remove.add(subj)
elif pred == "rename":
rename[subj] = obj
removes = "|".join(list(remove))
sgml = re.sub(r'</?'+removes+'(>| [^<>\n]*>)\n','',sgml,re.DOTALL|re.MULTILINE)
for f in rename:
r = rename[f]
# Run twice to catch both element and attribute name
sgml = re.sub(r'(<[^<>\n]*)'+f+r'([^<>\n]*>)',r'\1'+r+r'\2',sgml)
sgml = re.sub(r'(<[^<>\n]*)'+f+r'([^<>\n]*>)',r'\1'+r+r'\2',sgml)
return sgml
def make_spreadsheet(data, ether_path, format="sgml", ignore_elements=False):
if format=="sgml":
socialcalc_data = sgml_to_ether(data, ignore_elements)
socialcalc_data = fix_colnames(socialcalc_data)
ether_command = "curl --netrc --request PUT --header 'Content-Type: text/x-socialcalc' --data-binary @tempfilename " + ether_path # e.g. ether_path "http://127.0.0.1:8000/_/nlp_snippet"
elif format=="socialcalc":
socialcalc_data = data.encode("utf8")
ether_command = "curl --netrc --request PUT --header 'Content-Type: text/x-socialcalc' --data-binary @tempfilename " + ether_path # e.g. ether_path "http://127.0.0.1:8000/_/nlp_snippet"
else:
socialcalc_data = data
ether_command = "curl --netrc -i -X PUT --data-binary @tempfilename " + ether_path # e.g. ether_path "http://127.0.0.1:8000/_/nlp_snippet"
#ether_command = ["curl","--request","PUT","--header","'Content-Type: text/x-socialcalc'", "--data-binary", "@" + "tempfilename",
# ether_path] # e.g. ether_path "http://127.0.0.1:8000/_/nlp_snippet"
#ether_command = ["less","tempfilename",">","/var/www/html/gitdox/out.eth"]
#outfile = open("/var/www/html/gitdox/out.eth",'wb')
#outfile.write(socialcalc_data.encode("utf8"))
#outfile.close()
out, err = exec_via_temp(socialcalc_data,ether_command)
return out, err
def delete_spreadsheet(ether_url, name):
"""
Forcibly deletes EtherCalc spreadsheet from redis DB
:param name: name of the spreadsheet (last part of URL)
:return: void
"""
try:
r = requests.delete(ether_url + "_/" + name)
except:
pass
def sheet_exists(ether_path, name):
return len(get_socialcalc(ether_path,name)) > 0
def get_socialcalc(ether_path, name):
"""
Get SocialCalc format serialization for an EtherCalc spreadsheet
DB is available for a specified doc_id
:param ether_path: The EtherCalc server base URL, e.g. http://server.com/ethercalc/
:param name: spreadsheet name, e.g. gd_corpname_docname
:return: SocialCalc string
"""
command = "curl --netrc -X GET " + ether_path + "_/" + name
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = proc.communicate()
socialcalc = stdout.decode("utf8")
# Destroy empty span cells without content, typically nested underneath longer, filled spans
socialcalc = re.sub(r'cell:[A-Z]+[0-9]+:f:1:rowspan:[0-9]+\n','',socialcalc)
return socialcalc
def get_timestamps(ether_path):
r = requests.get(ether_path + "_roomtimes")
times = r.json()
output = {}
for room in times:
output[room.replace("timestamp-", "")] = times[room]
return output
if __name__ == "__main__":
data = ""
storage = cgi.FieldStorage()
if "data" in storage:
data = storage.getvalue("data")
else:
data = ""
data = re.sub('>', '>\n', data)
data = re.sub('</', '\n</', data)
data = re.sub('\n+', '\n', data)
ether_out = sgml_to_ether(data)
print(ether_out)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.