gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""Support for Android IP Webcam."""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
CONF_SENSORS, CONF_SWITCHES, CONF_TIMEOUT, CONF_SCAN_INTERVAL,
CONF_PLATFORM)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, async_dispatcher_connect)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL, CONF_STILL_IMAGE_URL)
_LOGGER = logging.getLogger(__name__)
ATTR_AUD_CONNS = 'Audio Connections'
ATTR_HOST = 'host'
ATTR_VID_CONNS = 'Video Connections'
CONF_MOTION_SENSOR = 'motion_sensor'
DATA_IP_WEBCAM = 'android_ip_webcam'
DEFAULT_NAME = 'IP Webcam'
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 10
DOMAIN = 'android_ip_webcam'
SCAN_INTERVAL = timedelta(seconds=10)
SIGNAL_UPDATE_DATA = 'android_ip_webcam_update'
KEY_MAP = {
'audio_connections': 'Audio Connections',
'adet_limit': 'Audio Trigger Limit',
'antibanding': 'Anti-banding',
'audio_only': 'Audio Only',
'battery_level': 'Battery Level',
'battery_temp': 'Battery Temperature',
'battery_voltage': 'Battery Voltage',
'coloreffect': 'Color Effect',
'exposure': 'Exposure Level',
'exposure_lock': 'Exposure Lock',
'ffc': 'Front-facing Camera',
'flashmode': 'Flash Mode',
'focus': 'Focus',
'focus_homing': 'Focus Homing',
'focus_region': 'Focus Region',
'focusmode': 'Focus Mode',
'gps_active': 'GPS Active',
'idle': 'Idle',
'ip_address': 'IPv4 Address',
'ipv6_address': 'IPv6 Address',
'ivideon_streaming': 'Ivideon Streaming',
'light': 'Light Level',
'mirror_flip': 'Mirror Flip',
'motion': 'Motion',
'motion_active': 'Motion Active',
'motion_detect': 'Motion Detection',
'motion_event': 'Motion Event',
'motion_limit': 'Motion Limit',
'night_vision': 'Night Vision',
'night_vision_average': 'Night Vision Average',
'night_vision_gain': 'Night Vision Gain',
'orientation': 'Orientation',
'overlay': 'Overlay',
'photo_size': 'Photo Size',
'pressure': 'Pressure',
'proximity': 'Proximity',
'quality': 'Quality',
'scenemode': 'Scene Mode',
'sound': 'Sound',
'sound_event': 'Sound Event',
'sound_timeout': 'Sound Timeout',
'torch': 'Torch',
'video_connections': 'Video Connections',
'video_chunk_len': 'Video Chunk Length',
'video_recording': 'Video Recording',
'video_size': 'Video Size',
'whitebalance': 'White Balance',
'whitebalance_lock': 'White Balance Lock',
'zoom': 'Zoom'
}
ICON_MAP = {
'audio_connections': 'mdi:speaker',
'battery_level': 'mdi:battery',
'battery_temp': 'mdi:thermometer',
'battery_voltage': 'mdi:battery-charging-100',
'exposure_lock': 'mdi:camera',
'ffc': 'mdi:camera-front-variant',
'focus': 'mdi:image-filter-center-focus',
'gps_active': 'mdi:crosshairs-gps',
'light': 'mdi:flashlight',
'motion': 'mdi:run',
'night_vision': 'mdi:weather-night',
'overlay': 'mdi:monitor',
'pressure': 'mdi:gauge',
'proximity': 'mdi:map-marker-radius',
'quality': 'mdi:quality-high',
'sound': 'mdi:speaker',
'sound_event': 'mdi:speaker',
'sound_timeout': 'mdi:speaker',
'torch': 'mdi:white-balance-sunny',
'video_chunk_len': 'mdi:video',
'video_connections': 'mdi:eye',
'video_recording': 'mdi:record-rec',
'whitebalance_lock': 'mdi:white-balance-auto'
}
SWITCHES = ['exposure_lock', 'ffc', 'focus', 'gps_active',
'motion_detect', 'night_vision', 'overlay',
'torch', 'whitebalance_lock', 'video_recording']
SENSORS = ['audio_connections', 'battery_level', 'battery_temp',
'battery_voltage', 'light', 'motion', 'pressure', 'proximity',
'sound', 'video_connections']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL):
cv.time_period,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_SWITCHES):
vol.All(cv.ensure_list, [vol.In(SWITCHES)]),
vol.Optional(CONF_SENSORS):
vol.All(cv.ensure_list, [vol.In(SENSORS)]),
vol.Optional(CONF_MOTION_SENSOR): cv.boolean,
})])
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the IP Webcam component."""
from pydroid_ipcam import PyDroidIPCam
webcams = hass.data[DATA_IP_WEBCAM] = {}
websession = async_get_clientsession(hass)
async def async_setup_ipcamera(cam_config):
"""Set up an IP camera."""
host = cam_config[CONF_HOST]
username = cam_config.get(CONF_USERNAME)
password = cam_config.get(CONF_PASSWORD)
name = cam_config[CONF_NAME]
interval = cam_config[CONF_SCAN_INTERVAL]
switches = cam_config.get(CONF_SWITCHES)
sensors = cam_config.get(CONF_SENSORS)
motion = cam_config.get(CONF_MOTION_SENSOR)
# Init ip webcam
cam = PyDroidIPCam(
hass.loop, websession, host, cam_config[CONF_PORT],
username=username, password=password,
timeout=cam_config[CONF_TIMEOUT]
)
if switches is None:
switches = [setting for setting in cam.enabled_settings
if setting in SWITCHES]
if sensors is None:
sensors = [sensor for sensor in cam.enabled_sensors
if sensor in SENSORS]
sensors.extend(['audio_connections', 'video_connections'])
if motion is None:
motion = 'motion_active' in cam.enabled_sensors
async def async_update_data(now):
"""Update data from IP camera in SCAN_INTERVAL."""
await cam.update()
async_dispatcher_send(hass, SIGNAL_UPDATE_DATA, host)
async_track_point_in_utc_time(
hass, async_update_data, utcnow() + interval)
await async_update_data(None)
# Load platforms
webcams[host] = cam
mjpeg_camera = {
CONF_PLATFORM: 'mjpeg',
CONF_MJPEG_URL: cam.mjpeg_url,
CONF_STILL_IMAGE_URL: cam.image_url,
CONF_NAME: name,
}
if username and password:
mjpeg_camera.update({
CONF_USERNAME: username,
CONF_PASSWORD: password
})
hass.async_create_task(discovery.async_load_platform(
hass, 'camera', 'mjpeg', mjpeg_camera, config))
if sensors:
hass.async_create_task(discovery.async_load_platform(
hass, 'sensor', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
CONF_SENSORS: sensors,
}, config))
if switches:
hass.async_create_task(discovery.async_load_platform(
hass, 'switch', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
CONF_SWITCHES: switches,
}, config))
if motion:
hass.async_create_task(discovery.async_load_platform(
hass, 'binary_sensor', DOMAIN, {
CONF_HOST: host,
CONF_NAME: name,
}, config))
tasks = [async_setup_ipcamera(conf) for conf in config[DOMAIN]]
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
return True
class AndroidIPCamEntity(Entity):
"""The Android device running IP Webcam."""
def __init__(self, host, ipcam):
"""Initialize the data object."""
self._host = host
self._ipcam = ipcam
async def async_added_to_hass(self):
"""Register update dispatcher."""
@callback
def async_ipcam_update(host):
"""Update callback."""
if self._host != host:
return
self.async_schedule_update_ha_state(True)
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_DATA, async_ipcam_update)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self._ipcam.available
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attr = {ATTR_HOST: self._host}
if self._ipcam.status_data is None:
return state_attr
state_attr[ATTR_VID_CONNS] = \
self._ipcam.status_data.get('video_connections')
state_attr[ATTR_AUD_CONNS] = \
self._ipcam.status_data.get('audio_connections')
return state_attr
|
|
import numpy as np
import scipy.special
import lmfit
h = 6.626e-34 # J/s
hbar = 1.054571e-34 #J/s
kB = 1.38065e-23 #J/K
qC = 1.602e-19 # C
kBeV = kB/qC
def sigmas(fres,Tphys,Tc):
wres = fres*2*np.pi
xi = hbar*wres/(2*kB*Tphys)
Delta = 3.52*kB*Tc/2.0
sigma1 = (((4*Delta) / (hbar*wres)) *
np.exp(-Delta/(kB*Tphys)) *
np.sinh(xi) *
scipy.special.k0(xi))
sigma2 = (((np.pi*Delta) / (hbar*wres)) *
(1 - np.sqrt((2*np.pi*kB*Tphys) / Delta)*np.exp(-Delta/(kB*Tphys))
- 2 * np.exp(-Delta/(kB*Tphys)) * np.exp(-xi) * scipy.special.i0(xi)))
return sigma1,sigma2
def s1s2(Tphys,Tc,fread):
xi = h*fread/(2*kB*Tphys)
Delta = 3.52*kB*Tc/2.0
S1 = (2/np.pi)*np.sqrt(2*Delta/(np.pi*kB*Tphys))*np.sinh(xi)*scipy.special.k0(xi)
S2 = 1 + np.sqrt(2*Delta/(np.pi*kB*Tphys))*np.exp(-xi)*scipy.special.i0(xi)
return S1,S2
def T_nqp_equals_nqp0(Delta,N0,nqp0):
"""
Find the temperature at which the thermally generated QP density equals the given nqp0
Delta : eV
Formula found by asking Wolfram Alpha to solve nqp(T) == nqp0 for T
"""
return np.real((2*Delta) /
(kBeV * scipy.special.lambertw(16 * Delta**2 * N0**2 * np.pi / nqp0)))
class KIDModel(object):
def __init__(self,Tc=1.46, nqp0=5000, Tbase=0.25, f0_nom = 150e6,
sigman=5e5, ind_l_um=46750.0, ind_w_um=2.0, ind_h_nm=20, Lg=7e-8,
Qc=1e5, P_read=-76, T_noise=4.0, delta_0 = 1e-3, F_TLS=1.0,
N0 = 1.72e10, cap=11.7e-12, T_star=0.35, tau_star=40e-6,
delta_loss=1e-5):
self.params = lmfit.Parameters()
self.params.add('nqp0',value = nqp0, min = 0,max=1e5)
self.params.add('delta_0',value = delta_0, min = 0,max=1)
self.params.add('F_TLS',value=F_TLS,min=0, max=1)
self.params.add('sigman',value=sigman,min=1e4,max=1e6)
self.params.add('T_star',value=T_star,min=.1,max=1)
self.params.add('Tc',value=Tc,min=1,max=2)
self.params.add('delta_loss',value=delta_loss,min=0,max=1e-1)
self.params.add('Lg',value = Lg,min=0,max=100)
self.params.add('cap',value = cap,min=0,max=100)
self.params.add('foffset',value=0,min=-1e-4,max=1e-4)
self.nqp0 = nqp0
# self.Tbase = Tbase
self.f0_nom = f0_nom
self.ind_l_um = ind_l_um
self.ind_w_um = ind_w_um
self.ind_h_nm = ind_h_nm
self.N0 = N0
self.Qc = Qc
self.tau_star = tau_star
self.P_read = P_read
self.T_noise = T_noise
self.fit_for = None
@property
def foffset(self):
return self.params['foffset'].value
@property
def delta_0(self):
return self.params['delta_0'].value
@property
def delta_loss(self):
return self.params['delta_loss'].value
@property
def Tc(self):
return self.params['Tc'].value
@property
def Lg(self):
# can upgrade this later to some sort of estimate
return self.params['Lg'].value*1e-9
@property
def cap(self):
return self.params['cap'].value*1e-12
@property
def ind_nsq(self):
return self.ind_l_um/self.ind_w_um
@property
def Delta(self):
return 1.74 * kBeV * self.Tc
@property
def ind_vol(self):
return self.ind_w_um * self.ind_l_um * self.ind_h_nm * 1e-3
@property
def v_read(self):
return np.sqrt(50 * 10**(self.P_read/10) * 1e-3)
def nqp(self,T):
nqp0 = self.params['nqp0'].value
return 2 * self.N0 * np.sqrt(2*np.pi*kBeV*T*self.Delta) * np.exp(-self.Delta/(kBeV*T)) + nqp0
def tau_qp(self,T):
T_star = self.params['T_star']
return self.tau_star*self.nqp(T_star)/self.nqp(T)
def mu_star(self,T):
return kBeV * T * np.log(self.nqp(T)/(2*self.N0*np.sqrt(2*np.pi*kBeV*T*self.Delta))) + self.Delta
def xi(self,T):
return (h*self.f0_nom)/(2*kB*T)
def sigma1(self,T):
xi = self.xi(T)
sigman = self.params['sigman'].value
return ((sigman*(4*self.Delta*qC)/(h*self.f0_nom)) *
np.exp(-(self.Delta-self.mu_star(T))/(kBeV*T)) *
np.sinh(xi) * scipy.special.k0(xi))
def sigma2(self,T):
xi = self.xi(T)
sigman = self.params['sigman'].value
return ((sigman*(np.pi*self.Delta*qC)/(h*self.f0_nom)) *
(1 - (self.nqp(T)/(2*self.N0*self.Delta))*(1+np.sqrt((2*self.Delta)/(np.pi*kBeV*T))*
np.exp(-xi)*scipy.special.i0(xi))))
def beta(self,T):
return self.sigma2(T)/self.sigma1(T)
def Lk(self,T):
return self.ind_nsq/(2*np.pi*self.f0_nom*self.sigma2(T)*self.ind_h_nm*1e-7)
def alpha(self,T):
lk = self.Lk(T)
return lk/(lk+self.Lg)
def Qi(self,T):
react = 2*np.pi*self.f0_nom*(self.Lg+self.Lk(T))
diss = self.ind_nsq*self.sigma1(T)/(self.sigma2(T)**2*self.ind_h_nm*1e-7)
return react/diss
def Qr(self,T):
return 1/(1/self.Qi(T)+1/self.Qc)
def depth_db(self,T):
return 20*np.log10(1-self.Qr(T)/self.Qc)
def res_width(self,T):
return self.f0_nom/self.Qr(T)
def total_qp(self,T):
return self.nqp(T)*self.ind_vol
def f_res(self,T):
return 1/(2*np.pi*np.sqrt((self.Lk(T)+self.Lg)*self.cap))
def dfdNqp(self,T):
y0 = self.f_res(T)
y1 = self.f_res(T+1e-3)
x0 = self.total_qp(T)
x1 = self.total_qp(T+1e-3)
return -(y1-y0)/(x1-x0)
def gr_noise(self,T):
return 2*np.sqrt(self.total_qp(T)*self.tau_qp(T))*self.dfdNqp(T)
def dVdf(self,T):
return 4 * self.v_read * self.alpha(T) * self.Qr(T)**2 / (self.Qc*self.f_res(T))
def amp_noise(self,T):
vn = np.sqrt(4*kB*self.T_noise*50)
return vn/self.dVdf(T)
def noise_spectrum(self,freq,Tphys):
fqp = 1/self.tau_qp(Tphys)
gr = (self.gr_noise(Tphys)*np.abs(1/(1+1j*freq/self.res_width(Tphys))))**2
return gr + self.amp_noise(Tphys)**2
def tls_shift(self,T):
F_TLS = self.params['F_TLS'].value
delta_0 = self.delta_0 #self.params['delta_0'].value
xi = self.xi(T)
return ((F_TLS*delta_0/np.pi) *
(np.real(scipy.special.psi(0.5+(xi/(1j*np.pi))))-
np.log(xi*2)))
def delta_tls(self,T):
delta_0 = self.delta_0 #self.params['delta_0'].value
xi = self.xi(T)
return delta_0 * np.tanh(xi) + self.params['delta_loss'].value
def total_Qi(self,T):
return 1/(1/self.Qi(T) + self.params['F_TLS'].value*self.delta_tls(T))
def total_fres(self,T):
#return (1+self.f_res(T))*(1+self.tls_shift(T))-1
return self.f_res(T)+self.tls_shift(T)+self.foffset
def fit_f0_resid(self,params,T,f0,f0_err=None):
if f0_err is None:
return (f0 - self.total_fres(T))#/self.f0_nom
else:
return (f0 - self.total_fres(T))/f0_err
def fit_f0(self,T,f0,f0_err=None):
self.fit_for = 'f0'
self.T_data = T
self.f0_data = f0
self.f0_err = f0_err
self.params['F_TLS'].value = 1.0
self.params['F_TLS'].vary = False
self.result = lmfit.minimize(self.fit_f0_resid,self.params,args=(T,f0,f0_err))
def fit_qi_resid(self,params,T,Qi,Qi_err=None):
if Qi_err is None:
return (Qi - self.total_Qi(T))#/1e7
else:
return abs(Qi - self.total_Qi(T))/Qi_err
def fit_qi(self,T,Qi,Qi_err=None):
self.fit_for = 'qi'
self.T_data = T
self.Qi_data = Qi
self.Qi_err = Qi_err
self.result = lmfit.minimize(self.fit_qi_resid,self.params,args=(T,Qi,Qi_err))
def fit_f0_qi_resid(self,params,T,f0,Qi,f0_err=None,Qi_err=None):
return np.concatenate((self.fit_f0_resid(params, T, f0,f0_err),self.fit_qi_resid(params, T, Qi,Qi_err)))
def fit_f0_qi(self,T,f0,Qi,f0_err = None, Qi_err = None,**kwargs):
self.fit_for = 'f0qi'
self.T_data = T
self.f0_data = f0
self.f0_err = f0_err
self.Qi_data = Qi
self.Qi_err = Qi_err
self.result = lmfit.minimize(self.fit_f0_qi_resid,self.params,args=(T,f0,Qi,f0_err,Qi_err),**kwargs)
def residual(self):
if self.fit_for == 'f0':
return self.fit_f0_resid(None,self.T_data,self.f0_data),self.f0_err
if self.fit_for == 'qi':
return self.fit_qi_resid(None,self.T_data,self.Qi_data),self.Qi_err
if self.fit_for == 'f0qi':
if self.f0_err is None or self.Qi_err is None:
errs = None
else:
errs = np.concatenate((self.f0_err,self.Qi_err))
return self.fit_f0_qi_resid(None,self.T_data,self.f0_data,self.Qi_data),errs
raise Exception("Got unexpected fit for argument: %s" % str(self.fit_for))
class DarkKIDModel(KIDModel):
def sigma1(self,T):
xi = self.xi(T)
sigman = self.params['sigman'].value
result = (sigman*(2*self.Delta*qC/(h*self.f0_nom)) *
(self.nqp(T)/(self.N0*np.sqrt(2*np.pi*kBeV*T*self.Delta))) *
np.sinh(xi) * scipy.special.k0(xi))
return result
def sigma2(self,T):
xi = self.xi(T)
sigman = self.params['sigman'].value
return ((sigman*(np.pi*self.Delta*qC)/(h*self.f0_nom)) *
(1 - (self.nqp(T)/(2*self.N0*self.Delta))*(1+np.sqrt((2*self.Delta)/(np.pi*kBeV*T))*
np.exp(-xi)*scipy.special.i0(xi))))
class DarkKIDModelFractional(DarkKIDModel):
def __init__(self,Tc=1.46, nqp0=0, f0_nom=100e6,
sigman=5e5, alpha=.66,
Qc=1e5, P_read=-106, T_noise=4.0, delta_0 = 0, F_TLS=1.0,
N0 = 1.72e10,
delta_loss=0.0):
self.params = lmfit.Parameters()
self.params.add('nqp0',value = nqp0, min = 0,max=1e5,vary=False)
self.params.add('delta_0',value = delta_0, min = 0,max=.1)
self.params.add('F_TLS',value=F_TLS,vary=False)
# self.params.add('sigman',value=sigman,min=1e4,max=1e6)
self.params.add('Tc',value=Tc,min=1,max=2,vary=False)
self.params.add('delta_loss',value=delta_loss,min=0,max=1e-1)
self.params.add('alpha',value = alpha,min=0.1,max=1,vary=True)
self.params.add('foffset',value=0,min=-1e-4,max=1e-4)
# self.Tbase = Tbase
self.N0 = N0
self.Qc = Qc
self.P_read = P_read
self.T_noise = T_noise
self.f0_nom = f0_nom
@property
def alpha(self):
return self.params['alpha'].value
@property
def nqp0(self):
return self.params['nqp0'].value
def f_res(self, T):
s1,s2 = s1s2(T, self.Tc, self.f0_nom)
#T0 = T_nqp_equals_nqp0(self.Delta, self.N0, self.nqp0)
delta_f = (self.alpha * s2 * self.nqp(T)) / (4 * self.N0 * self.Delta)
delta_f0 = 0#(self.alpha * s2 * (2*self.nqp0)) / (4 * self.N0 * self.Delta)
return -(delta_f-delta_f0)
def Qi(self,T):
s1,s2 = s1s2(T, self.Tc, self.f0_nom)
return (2*self.N0*self.Delta)/(self.alpha*s1*self.nqp(T))
def invQi(self,T):
return 1/self.Qi(T)
def total_delta_invQi(self,T):
delta_TLS_loss = self.delta_tls(T)
invQi = self.invQi(T)
return invQi + delta_TLS_loss + self.delta_loss
class DarkKIDModel2(KIDModel):
def sigma1(self,T):
xi = self.xi(T)
sigman = self.params['sigman'].value
result = (sigman*(2*self.Delta*qC/(h*self.f0_nom)) *
(self.nqp(T)/(self.N0*np.sqrt(2*np.pi*kBeV*T*self.Delta))) *
np.sinh(xi) * scipy.special.k0(xi))
return result
def sigma2(self,T):
xi = self.xi(T)
sigman = self.params['sigman'].value
return ((sigman*(np.pi*self.Delta*qC)/(h*self.f0_nom)) *
(1 - (self.nqp(T)/(2*self.N0*self.Delta))*(1+np.sqrt((2*self.Delta)/(np.pi*kBeV*T))*
np.exp(-xi)*scipy.special.i0(xi))))
|
|
import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_tag_revs(dest)
revisions.update(self.get_branch_revs(dest))
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_tag_revs(self, location):
tags = self._get_all_tag_names(location)
tag_revs = {}
for line in tags.splitlines():
tag = line.strip()
rev = self._get_revision_from_rev_parse(tag, location)
tag_revs[tag] = rev.strip()
return tag_revs
def get_branch_revs(self, location):
branches = self._get_all_branch_names(location)
branch_revs = {}
for line in branches.splitlines():
if '(no branch)' in line:
continue
line = line.split('->')[0].strip()
# actual branch case
branch = "".join(b for b in line.split() if b != '*')
rev = self._get_revision_from_rev_parse(branch, location)
branch_revs[branch] = rev.strip()
return branch_revs
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif (current_rev in branch_revs and
branch_revs[current_rev] != 'origin/master'):
# It's the head of a branch
full_egg_name = '%s-%s' % (
egg_project_name,
branch_revs[current_rev].replace('origin/', '')
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def _get_all_tag_names(self, location):
return call_subprocess([self.cmd, 'tag', '-l'],
show_stdout=False,
raise_on_returncode=False,
cwd=location)
def _get_all_branch_names(self, location):
remote_branches = call_subprocess([self.cmd, 'branch', '-r'],
show_stdout=False, cwd=location)
local_branches = call_subprocess([self.cmd, 'branch', '-l'],
show_stdout=False, cwd=location)
return remote_branches + local_branches
def _get_revision_from_rev_parse(self, name, location):
return call_subprocess([self.cmd, 'rev-parse', name],
show_stdout=False, cwd=location)
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
|
|
#
# test_base.py
#
# test base class
#
#
import sys
import math
sys.path.append("../common")
from bsg_cache_trace_gen import *
class TestBase:
MAX_ADDR = (2**17)
# default constructor
def __init__(self):
addr_width_p = 30
self.data_width_p = 512/int(sys.argv[1])
self.tg = BsgCacheTraceGen(addr_width_p,self.data_width_p)
self.curr_data = 1
self.sets_p = 64
self.ways_p = 8
self.block_size_in_words_p = int(sys.argv[1])
# TAGST
def send_tagst(self, way, index, valid=0, lock=0, tag=0):
addr = self.get_addr(way, index)
data = (valid << 63) + (lock << 62) + tag
self.tg.send(TAGST, addr, data)
# SD
def send_sd(self, addr):
self.tg.send(SD, addr, self.curr_data)
self.curr_data += 1
# SW
def send_sw(self, addr):
self.tg.send(SW, addr, self.curr_data)
self.curr_data += 1
# SH
def send_sh(self, addr):
self.tg.send(SH, addr, self.curr_data)
self.curr_data += 1
# SB
def send_sb(self, addr):
self.tg.send(SB, addr, self.curr_data)
self.curr_data += 1
# SM
def send_sm(self, addr, mask):
self.tg.send(SM, addr, self.curr_data, mask)
self.curr_data += 1
# LM
def send_lm(self, addr, mask):
self.tg.send(LM, addr, 0, mask)
# LD
def send_ld(self, addr):
self.tg.send(LD, addr)
# LW
def send_lw(self, addr):
self.tg.send(LW, addr)
# LH
def send_lh(self, addr):
self.tg.send(LH, addr)
# LB
def send_lb(self, addr):
self.tg.send(LB, addr)
# LWU
def send_lwu(self, addr):
self.tg.send(LWU, addr)
# LHU
def send_lhu(self, addr):
self.tg.send(LHU, addr)
# LBU
def send_lbu(self, addr):
self.tg.send(LBU, addr)
# AMOSWAP_W
def send_amoswap_w(self, addr):
self.tg.send(AMOSWAP_W, addr, self.curr_data)
self.curr_data += 1
# AMOADD_W
def send_amoadd_w(self, addr):
self.tg.send(AMOADD_W, addr, self.curr_data)
self.curr_data += 1
# AMOXOR_W
def send_amoxor_w(self, addr):
self.tg.send(AMOXOR_W, addr, self.curr_data)
self.curr_data += 1
# AMOAND_W
def send_amoand_w(self, addr):
self.tg.send(AMOAND_W, addr, self.curr_data)
self.curr_data += 1
# AMOOR_W
def send_amoor_w(self, addr):
self.tg.send(AMOOR_W, addr, self.curr_data)
self.curr_data += 1
# AMOMIN_W
def send_amomin_w(self, addr):
self.tg.send(AMOMIN_W, addr, self.curr_data)
self.curr_data += 1
# AMOMAX_W
def send_amomax_w(self, addr):
self.tg.send(AMOMAX_W, addr, self.curr_data)
self.curr_data += 1
# AMOMINU_W
def send_amominu_w(self, addr):
self.tg.send(AMOMINU_W, addr, self.curr_data)
self.curr_data += 1
# AMOMAXU_W
def send_amomaxu_w(self, addr):
self.tg.send(AMOMAXU_W, addr, self.curr_data)
self.curr_data += 1
# AMOSWAP_D
def send_amoswap_d(self, addr):
self.tg.send(AMOSWAP_D, addr, self.curr_data)
self.curr_data += 1
# AMOADD_D
def send_amoadd_d(self, addr):
self.tg.send(AMOADD_D, addr, self.curr_data)
self.curr_data += 1
# AMOXOR_D
def send_amoxor_d(self, addr):
self.tg.send(AMOXOR_D, addr, self.curr_data)
self.curr_data += 1
# AMOAND_D
def send_amoand_d(self, addr):
self.tg.send(AMOAND_D, addr, self.curr_data)
self.curr_data += 1
# AMOOR_D
def send_amoor_d(self, addr):
self.tg.send(AMOOR_D, addr, self.curr_data)
self.curr_data += 1
# AMOMIN_D
def send_amomin_d(self, addr):
self.tg.send(AMOMIN_D, addr, self.curr_data)
self.curr_data += 1
# AMOMAX_D
def send_amomax_d(self, addr):
self.tg.send(AMOMAX_D, addr, self.curr_data)
self.curr_data += 1
# AMOMINU_D
def send_amominu_d(self, addr):
self.tg.send(AMOMINU_D, addr, self.curr_data)
self.curr_data += 1
# AMOMAXU_D
def send_amomaxu_d(self, addr):
self.tg.send(AMOMAXU_D, addr, self.curr_data)
self.curr_data += 1
# nop
def send_nop(self, n=1):
for i in range(n):
self.tg.nop()
# #
# COMPOSITE FUNCTIONS #
# #
def clear_tag(self):
for way in range(self.ways_p):
for index in range(self.sets_p):
self.send_tagst(way, index)
# #
# HELPER FUNCTIONS #
# #
def get_addr(self, tag, index, block_offset=0, byte_offset=0):
lg_data_size_in_byte_lp = int(math.log(self.data_width_p/8, 2))
addr = tag << 12
addr += index << 6
addr += block_offset << lg_data_size_in_byte_lp
addr += byte_offset
return addr
|
|
# -*- coding: utf-8 -*-
import sys
#sys.path.append('/home/camacho/Downloads/emcee-master')
#sys.path.append('/home/up200908455/Desktop/Github/Gedi')
#sys.path.append('/home/up200908455/Desktop/Github/Emcee')
#sys.path.append('/home/up200908455/Desktop/Github/George')
#sys.path.append('/home/up200908455/Desktop/Github/Corner')
#sys.path.append('/home/up200908455/Desktop/Github/Scipy')
import Gedi as gedi
import numpy as np; #np.random.seed(13042017)
import matplotlib.pylab as pl; pl.close("all")
from matplotlib.ticker import MaxNLocator
import astropy.table as Table
from time import time
import emcee as emcee
import corner
from scipy import stats
import cPickle as pickle
print'It has began.'
print
##### PROPERTIES ##############################################################
#spots dataset to analyse
ijk_initial= 20
ijk_final=ijk_initial+1
#period interval to analyse
Pa, Pb = 15, 35
#MCMC
burns, runs = 10000, 10000
#Defining what's supose to run: 1 runs 0 doesn't
day_1, daydecay_1, daydecaygap_1 = 1, 1, 1 #1 measurement a day
day_4, daydecay_4, daydecaygap_4 = 1, 1, 1 #1 measurement every 4 days
#Priors
def lnprob(p):
global kernel
if any([p[0] < -6, p[0] > 6,
p[1] < -10, p[1] > np.log(10),
p[2] < np.log(Pa), p[2] > np.log(Pb),
p[3] < -10, p[3] > np.log(10)]):
return -np.inf
lnprior=0.0
# if any([p[0] < -6, p[0] > 6,
# p[1] < np.log(0.1), p[1] > np.log(10),
# p[2] < np.log(10), p[2] > np.log(50),
# p[3] < -10, p[3] > np.log(10)]):
# return -np.inf
# lnprior=0.0
# Update the kernel and compute the lnlikelihood.
kernel=gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
###############################################################################
for ijk in range(ijk_initial,ijk_final):
#file to use of data of spots on the 2 hemispheres
soap_file='output_spots{0}'.format(ijk)
#stats.uniform(a,b-a)
# amplitude_prior=stats.uniform(np.exp(-6), np.exp(6)-np.exp(-6))
# lenghtscale_prior=stats.uniform(0.1, 10.0-0.1)
# period_prior=stats.uniform(10, 50-10)
# wn_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
amplitude_prior=stats.uniform(np.exp(-6), np.exp(6)-np.exp(-6))
lenghtscale_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
period_prior=stats.uniform(Pa, Pb-Pa)
wn_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
def from_prior():
return np.array([amplitude_prior.rvs(),lenghtscale_prior.rvs(),
period_prior.rvs(),wn_prior.rvs()])
##### FILE ################################################################
if day_1 ==1:
f=open("{0}_1day.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr)
t= np.array(range(1,101))
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
#samples=pickle.load(open("{0}_1day.p".format(soap_file)))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day.png'.format(soap_file))
pl.close('all')
print '> Preparing final kernel.'
print
kernelf=gedi.kernel.ExpSineSquared(theta_mcmc[0],l_mcmc[0],p_mcmc[0]) +\
gedi.kernel.WhiteNoise(wn_mcmc[0])
print 'Kernel =', kernelf
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernelf,t,y,yerr)
print
print 'Done.'
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ################################################################
if daydecay_1==1:
f=open("{0}_1day_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr)
decay=np.linspace(1,0.5,len(y))
y=[n*m for n,m in zip(y,decay)]
y=np.array(y)
t= np.array(range(1,101))
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decay.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_decay_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_decay_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_decay_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_decay_WN.p".format(soap_file), 'w'),protocol=-1)
#samples=pickle.load(open("{0}_1day.p".format(soap_file)))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day_decay.png'.format(soap_file))
pl.close('all')
print '> Preparing final kernel.'
print
kernelf=gedi.kernel.ExpSineSquared(theta_mcmc[0],l_mcmc[0],p_mcmc[0]) +\
gedi.kernel.WhiteNoise(wn_mcmc[0])
print 'Kernel =', kernelf
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernelf,t,y,yerr)
print
print 'Done.'
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ################################################################
if daydecaygap_1==1:
f=open("{0}_1day_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info+yerr)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
yerr=np.array(yerr1)
y=np.array(y)
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decaygap.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_dgap_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_dgap_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_dgap_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_dgap_WN.p".format(soap_file), 'w'),protocol=-1)
#samples=pickle.load(open("{0}_1day.p".format(soap_file)))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day_decaygap.png'.format(soap_file))
pl.close('all')
print '> Preparing final kernel.'
print
kernelf=gedi.kernel.ExpSineSquared(theta_mcmc[0],l_mcmc[0],p_mcmc[0]) +\
gedi.kernel.WhiteNoise(wn_mcmc[0])
print 'Kernel =', kernelf
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernelf,t,y,yerr)
print
print 'Done.'
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ################################################################
if day_4==1:
f=open("{0}_4days.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per 4 days
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr0= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info+yerr0)
t0= np.array(range(1,101))
y=[]
yerr=[]
t=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
print "Done."
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_4days_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_4days_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_4days_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_4days_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days.png'.format(soap_file))
pl.close('all')
print '> Preparing final kernel.'
print
kernelf=gedi.kernel.ExpSineSquared(theta_mcmc[0],l_mcmc[0],p_mcmc[0]) +\
gedi.kernel.WhiteNoise(wn_mcmc[0])
print 'Kernel =', kernelf
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernelf,t,y,yerr)
print
print 'Done.'
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ################################################################
if daydecay_4==1:
f=open("{0}_4days_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per 4 days
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr0= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr0)
decay=np.linspace(1,0.5,len(y))
y=[n*m for n,m in zip(y,decay)]
y0=np.array(y)
t0=np.array(range(1,101))
y=[]
yerr=[]
t=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decay.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_4days_decay_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_4days_decay_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_4days_decay_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_4days_decay_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days_decay.png'.format(soap_file))
pl.close('all')
print '> Preparing final kernel.'
print
kernelf=gedi.kernel.ExpSineSquared(theta_mcmc[0],l_mcmc[0],p_mcmc[0]) +\
gedi.kernel.WhiteNoise(wn_mcmc[0])
print 'Kernel =', kernelf
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernelf,t,y,yerr)
print
print 'Done.'
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ################################################################
if daydecaygap_4==1:
f=open("{0}_4days_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info+yerr)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
yerr0=np.array(yerr1)
y0=np.array(y)
t0=t
y=[]
yerr=[]
t=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decaygap.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_4days_dgap_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_4days_dgap_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_4days_dgap_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_4days_dgap_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days_decaygap.png'.format(soap_file))
pl.close('all')
print '> Preparing final kernel.'
print
kernelf=gedi.kernel.ExpSineSquared(theta_mcmc[0],l_mcmc[0],p_mcmc[0]) +\
gedi.kernel.WhiteNoise(wn_mcmc[0])
print 'Kernel =', kernelf
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernelf,t,y,yerr)
print
print 'Done.'
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
print 'It is over'
print
|
|
import logging
from mopidy import models
import spotify
from mopidy_spotify import countries, playlists, translator
from mopidy_spotify.utils import flatten
logger = logging.getLogger(__name__)
ROOT_DIR = models.Ref.directory(uri="spotify:directory", name="Spotify")
_TOP_LIST_DIR = models.Ref.directory(uri="spotify:top", name="Top lists")
_YOUR_MUSIC_DIR = models.Ref.directory(uri="spotify:your", name="Your music")
_PLAYLISTS_DIR = models.Ref.directory(uri="spotify:playlists", name="Playlists")
_ROOT_DIR_CONTENTS = [
_TOP_LIST_DIR,
_YOUR_MUSIC_DIR,
_PLAYLISTS_DIR,
]
_TOP_LIST_DIR_CONTENTS = [
models.Ref.directory(uri="spotify:top:tracks", name="Top tracks"),
models.Ref.directory(uri="spotify:top:albums", name="Top albums"),
models.Ref.directory(uri="spotify:top:artists", name="Top artists"),
]
_YOUR_MUSIC_DIR_CONTENTS = [
models.Ref.directory(uri="spotify:your:tracks", name="Your tracks"),
models.Ref.directory(uri="spotify:your:albums", name="Your albums"),
]
_PLAYLISTS_DIR_CONTENTS = [
models.Ref.directory(uri="spotify:playlists:featured", name="Featured"),
]
_TOPLIST_TYPES = {
"albums": spotify.ToplistType.ALBUMS,
"artists": spotify.ToplistType.ARTISTS,
"tracks": spotify.ToplistType.TRACKS,
}
_TOPLIST_REGIONS = {
"country": lambda session: session.user_country,
"everywhere": lambda session: spotify.ToplistRegion.EVERYWHERE,
}
def browse(*, config, session, web_client, uri):
if uri == ROOT_DIR.uri:
return _ROOT_DIR_CONTENTS
elif uri == _TOP_LIST_DIR.uri:
return _TOP_LIST_DIR_CONTENTS
elif uri == _YOUR_MUSIC_DIR.uri:
return _YOUR_MUSIC_DIR_CONTENTS
elif uri == _PLAYLISTS_DIR.uri:
return _PLAYLISTS_DIR_CONTENTS
elif uri.startswith("spotify:user:") or uri.startswith("spotify:playlist:"):
return _browse_playlist(session, web_client, uri, config)
elif uri.startswith("spotify:album:"):
return _browse_album(session, uri, config)
elif uri.startswith("spotify:artist:"):
return _browse_artist(session, uri, config)
elif uri.startswith("spotify:top:"):
parts = uri.replace("spotify:top:", "").split(":")
if len(parts) == 1:
return _browse_toplist_regions(variant=parts[0])
elif len(parts) == 2:
if parts[1] == "user":
return _browse_toplist_user(web_client, variant=parts[0])
return _browse_toplist(
config, session, variant=parts[0], region=parts[1]
)
else:
logger.info(f"Failed to browse {uri!r}: Toplist URI parsing failed")
return []
elif uri.startswith("spotify:your:"):
parts = uri.replace("spotify:your:", "").split(":")
if len(parts) == 1:
return _browse_your_music(web_client, variant=parts[0])
elif uri.startswith("spotify:playlists:"):
parts = uri.replace("spotify:playlists:", "").split(":")
if len(parts) == 1:
return _browse_playlists(web_client, variant=parts[0])
logger.info(f"Failed to browse {uri!r}: Unknown URI type")
return []
def _browse_playlist(session, web_client, uri, config):
return playlists.playlist_lookup(
session, web_client, uri, config["bitrate"], as_items=True
)
def _browse_album(session, uri, config):
sp_album_browser = session.get_album(uri).browse()
sp_album_browser.load(config["timeout"])
return list(
translator.to_track_refs(
sp_album_browser.tracks, timeout=config["timeout"]
)
)
def _browse_artist(session, uri, config):
sp_artist_browser = session.get_artist(uri).browse(
type=spotify.ArtistBrowserType.NO_TRACKS
)
sp_artist_browser.load(config["timeout"])
top_tracks = list(
translator.to_track_refs(
sp_artist_browser.tophit_tracks, timeout=config["timeout"]
)
)
albums = list(
translator.to_album_refs(
sp_artist_browser.albums, timeout=config["timeout"]
)
)
return top_tracks + albums
def _browse_toplist_regions(variant):
dir_contents = [
models.Ref.directory(
uri=f"spotify:top:{variant}:country", name="Country"
),
models.Ref.directory(
uri=f"spotify:top:{variant}:countries", name="Other countries"
),
models.Ref.directory(
uri=f"spotify:top:{variant}:everywhere", name="Global"
),
]
if variant in ("tracks", "artists"):
dir_contents.insert(
0,
models.Ref.directory(
uri=f"spotify:top:{variant}:user", name="Personal"
),
)
return dir_contents
def _browse_toplist_user(web_client, variant):
if not web_client.logged_in:
return []
if variant in ("tracks", "artists"):
items = flatten(
[
page.get("items", [])
for page in web_client.get_all(
f"me/top/{variant}",
params={"limit": 50},
)
if page
]
)
if variant == "tracks":
return list(
translator.web_to_track_refs(items, check_playable=False)
)
else:
return list(translator.web_to_artist_refs(items))
else:
return []
def _browse_toplist(config, session, variant, region):
if region == "countries":
codes = config["toplist_countries"]
if not codes:
codes = countries.COUNTRIES.keys()
return [
models.Ref.directory(
uri=f"spotify:top:{variant}:{code.lower()}",
name=countries.COUNTRIES.get(code.upper(), code.upper()),
)
for code in codes
]
if region in ("country", "everywhere"):
sp_toplist = session.get_toplist(
type=_TOPLIST_TYPES[variant],
region=_TOPLIST_REGIONS[region](session),
)
elif len(region) == 2:
sp_toplist = session.get_toplist(
type=_TOPLIST_TYPES[variant], region=region.upper()
)
else:
return []
if session.connection.state is spotify.ConnectionState.LOGGED_IN:
sp_toplist.load(config["timeout"])
if not sp_toplist.is_loaded:
return []
if variant == "tracks":
return list(translator.to_track_refs(sp_toplist.tracks))
elif variant == "albums":
return list(
translator.to_album_refs(
sp_toplist.albums, timeout=config["timeout"]
)
)
elif variant == "artists":
return list(
translator.to_artist_refs(
sp_toplist.artists, timeout=config["timeout"]
)
)
else:
return []
def _load_your_music(web_client, variant):
if web_client is None or not web_client.logged_in:
return
if variant not in ("tracks", "albums"):
return
results = web_client.get_all(
f"me/{variant}",
params={"market": "from_token", "limit": 50},
)
for page in results:
if not page:
continue
items = page.get("items", [])
for item in items:
yield item
def _browse_your_music(web_client, variant):
items = _load_your_music(web_client, variant)
if variant == "tracks":
return list(translator.web_to_track_refs(items))
elif variant == "albums":
return list(translator.web_to_album_refs(items))
else:
return []
def _browse_playlists(web_client, variant):
if not web_client.logged_in:
return []
if variant == "featured":
items = flatten(
[
page.get("playlists", {}).get("items", [])
for page in web_client.get_all(
"browse/featured-playlists",
params={"limit": 50},
)
if page
]
)
return list(translator.to_playlist_refs(items))
else:
return []
|
|
"""Queries."""
from collections import ChainMap
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from . import db
from .models import (Characteristic, CharacteristicGroup, Country, Data,
EnglishString, Geography, Indicator, Survey, Translation)
# pylint: disable=too-many-public-methods
class DatalabData:
"""PmaData."""
char1 = aliased(Characteristic)
char2 = aliased(Characteristic)
char_grp1 = aliased(CharacteristicGroup)
char_grp2 = aliased(CharacteristicGroup)
@staticmethod
def all_joined(*select_args):
"""Datalab data joined."""
chr1 = DatalabData.char1
chr2 = DatalabData.char2
grp1 = DatalabData.char_grp1
grp2 = DatalabData.char_grp2
joined = db.session.query(*select_args) \
.select_from(Data) \
.join(Survey, Data.survey_id == Survey.id) \
.join(Geography, Survey.geography_id == Geography.id) \
.join(Country, Survey.country_id == Country.id) \
.join(Indicator, Data.indicator_id == Indicator.id) \
.outerjoin(chr1, Data.char1_id == chr1.id) \
.outerjoin(grp1, grp1.id == chr1.char_grp_id) \
.outerjoin(chr2, Data.char2_id == chr2.id) \
.outerjoin(grp2, grp2.id == chr2.char_grp_id)
return joined
@staticmethod
def series_query(survey_codes, indicator_code, char_grp_code, over_time):
"""Get the series based on supplied codes."""
json_list = DatalabData.filter_minimal(survey_codes, indicator_code,
char_grp_code, over_time)
if over_time:
series_list = DatalabData.data_to_time_series(json_list)
else:
series_list = DatalabData.data_to_series(json_list)
return series_list
@staticmethod
def data_to_time_series(sorted_data):
"""Transform a sorted list of data into time series."""
curr_char = None
curr_geo = None
results = []
next_series = {}
for obj in sorted_data:
new_char = obj['characteristic.id'] != curr_char
new_geo = obj['geography.id'] != curr_geo
if new_char or new_geo:
if curr_char is not None and curr_geo is not None:
results.append(next_series)
next_series = {
'characteristic.id': obj.pop('characteristic.id'),
'characteristic.label.id':
obj.pop('characteristic.label.id'),
'geography.id': obj.pop('geography.id'),
'geography.label.id': obj.pop('geography.label.id'),
'country.id': obj.pop('country.id'),
'country.label.id': obj.pop('country.label.id'),
'values': [
{
'survey.id': obj.pop('survey.id'),
'survey.label.id': obj.pop('survey.label.id'),
'survey.date': obj.pop('survey.date'),
'value': obj.pop('value'),
}
]
}
curr_char = next_series['characteristic.id']
curr_geo = next_series['geography.id']
else:
next_series['values'].append({
'survey.id': obj.pop('survey.id'),
'survey.label.id': obj.pop('survey.label.id'),
'survey.date': obj.pop('survey.date'),
'value': obj.pop('value'),
})
if next_series:
results.append(next_series)
return results
@staticmethod
def data_to_series(sorted_data):
"""Transform a sorted list of data into series."""
curr_survey = None
results = []
next_series = {}
for obj in sorted_data:
if obj['survey.id'] != curr_survey:
if curr_survey is not None:
results.append(next_series)
next_series = {
'survey.id': obj.pop('survey.id'),
'survey.label.id': obj.pop('survey.label.id'),
'geography.id': obj.pop('geography.id'),
'geography.label.id': obj.pop('geography.label.id'),
'country.id': obj.pop('country.id'),
'country.label.id': obj.pop('country.label.id'),
'values': [
{
'characteristic.label.id':
obj.pop('characteristic.label.id'),
'characteristic.id': obj.pop('characteristic.id'),
'value': obj.pop('value'),
}
]
}
curr_survey = next_series['survey.id']
else:
next_series['values'].append({
'characteristic.label.id':
obj.pop('characteristic.label.id'),
'characteristic.id': obj.pop('characteristic.id'),
'value': obj.pop('value'),
})
if next_series:
results.append(next_series)
return results
@staticmethod
def filter_readable(survey_codes, indicator_code, char_grp_code,
lang=None):
"""Get filtered Datalab data and return readable columns.
Args:
survey_codes (str): A list of survey codes joined together by a
comma
indicator_code (str): An indicator code
char_grp_code (str): A characteristic group code
lang (str): The language, if specified.
Filters the data based on the function arguments.
Returns:
A list of simple python objects, one for each record found by
applying the various filters.
"""
chr1 = DatalabData.char1
grp1, grp2 = DatalabData.char_grp1, DatalabData.char_grp2
select_args = (Data, Survey, Indicator, grp1, chr1)
filtered = DatalabData.all_joined(*select_args)
if survey_codes is not None:
survey_sql = DatalabData.survey_list_to_sql(survey_codes)
filtered = filtered.filter(survey_sql)
if indicator_code is not None:
filtered = filtered.filter(Indicator.code == indicator_code)
if char_grp_code is not None:
filtered = filtered.filter(grp1.code == char_grp_code)
# TODO (jkp, begin=2017-08-28): This will be grp2.code == 'none'
# eventually when the Data show "none" for char_grp2 in excel import
# Remove E711 from .pycodestyle
# pylint: disable=singleton-comparison
filtered = filtered.filter(grp2.code == None)
results = filtered.all()
json_results = []
for item in results:
precision = item[0].precision
if precision is None:
precision = 1
value = round(item[0].value, precision)
this_dict = {
'value': value,
'survey.id': item[1].code,
'survey.date': item[1].start_date.strftime('%m-%Y'),
'indicator.label': item[2].label.to_string(lang),
'characteristicGroup.label': item[3].label.to_string(lang),
'characteristic.label': item[4].label.to_string(lang)
}
json_results.append(this_dict)
return json_results
@staticmethod
def filter_minimal(survey_codes, indicator_code, char_grp_code, over_time):
"""Get filtered Datalab data and return minimal columns.
Args:
survey_codes (str): A list of survey codes joined together by a
comma
indicator_code (str): An indicator code
char_grp_code (str): A characteristic group code
Filters the data based on the function arguments. The returned data
are data value, the precision, the survey code, the indicator code,
the characteristic group code, and the characteristic code.
Returns:
A list of simple python objects, one for each record found by
applying the various filters.
"""
chr1 = DatalabData.char1
grp1, grp2 = DatalabData.char_grp1, DatalabData.char_grp2
select_args = (Data, Survey, Indicator.code, grp1.code, chr1,
Geography, Country)
filtered = DatalabData.all_joined(*select_args)
if survey_codes is not None:
survey_sql = DatalabData.survey_list_to_sql(survey_codes)
filtered = filtered.filter(survey_sql)
if indicator_code is not None:
filtered = filtered.filter(Indicator.code == indicator_code)
if char_grp_code is not None:
filtered = filtered.filter(grp1.code == char_grp_code)
# TODO (jkp, begin=2017-08-28): This will be grp2.code == 'none'
# eventually when the Data show "none" for char_grp2 in excel import
# Remove E711 from .pycodestyle
# pylint: disable=singleton-comparison
filtered = filtered.filter(grp2.code == None)
if over_time:
# This ordering is very important!
ordered = filtered.order_by(Geography.order) \
.order_by(chr1.order) \
.order_by(Survey.order)
# Perhaps order by the date of the survey?
else:
ordered = filtered.order_by(Survey.order) \
.order_by(chr1.order)
results = ordered.all()
json_results = []
for item in results:
this_dict = {
'value': item[0].value,
'precision': item[0].precision,
'survey.id': item[1].code,
'survey.date': item[1].start_date.strftime('%m-%Y'),
'survey.label.id': item[1].label.code,
'indicator.id': item[2],
'characteristicGroup.id': item[3],
'characteristic.id': item[4].code,
'characteristic.label.id': item[4].label.code,
'geography.label.id': item[5].subheading.code,
'geography.id': item[5].code,
'country.label.id': item[6].label.code,
'country.id': item[6].code
}
json_results.append(this_dict)
return json_results
@staticmethod
def survey_list_to_sql(survey_list):
"""Turn a list of surveys passed through URL to SQL.
Args:
survey_list (str): A list of survey codes
Returns:
The SQLAlchemy object that represents these OR'd together.
"""
return DatalabData.api_list_to_sql_list(Survey, survey_list)
@staticmethod
def api_list_to_sql_list(model, query_values):
"""Convert generally query args to SQL.
Args:
model (db.Model): A model object with a code attribute
query_values (str): A list of codes joined by comma
Results:
The SQLAlchemy object that represents these OR'd together.
"""
# TODO (jkp 2017-08-28) Error checking on survey_list.
split = query_values.split(',')
sql_exprs = [model.code == code for code in split]
if len(sql_exprs) > 1:
full_sql = or_(*sql_exprs)
else:
full_sql = sql_exprs[0]
return full_sql
@staticmethod
def combos_all(survey_list, indicator, char_grp):
# pylint: disable=too-many-locals
"""Get lists of all valid datalab selections.
Based on a current selection in the datalab, this method returns lists
of what should be clickable in each of the three selection areas of
the datalab.
Args:
survey_list (list of str): A list of survey codes. An empty list if
not provided.
indicator (str): An indicator code or None if not provided.
char_grp(str): An characteristic group code or None if not
provided.
Returns:
A dictionary with a survey list, an indicator list, and a
characteristic group list.
"""
def keep_survey(this_indicator, this_char_grp):
"""Determine whether a survey from the data is valid.
Args:
this_indicator (str): An indicator code from the data
this_char_grp (str): A characteristic code from the data
Returns:
True or False to say if the related survey code should be
included in the return set.
"""
if indicator is None and char_grp is None:
keep = True
elif indicator is None and char_grp is not None:
keep = this_char_grp == char_grp
elif indicator is not None and char_grp is None:
keep = this_indicator == indicator
else:
indicator_match = this_indicator == indicator
char_grp_match = this_char_grp == char_grp
keep = indicator_match and char_grp_match
return keep
def keep_indicator(this_survey, this_char_grp):
"""Determine whether an indicator from the data is valid.
Args:
this_survey (str): A survey code from the data
this_char_grp (str): A characteristic code from the data
Returns:
True or False to say if the related indicator code should be
included in the return set.
"""
if not survey_list and char_grp is None:
keep = True
elif not survey_list and char_grp is not None:
keep = this_char_grp == char_grp
elif survey_list and char_grp is None:
keep = this_survey in survey_list
else:
survey_match = this_survey in survey_list
char_grp_match = this_char_grp == char_grp
keep = survey_match and char_grp_match
return keep
def keep_char_grp(this_survey, this_indicator):
"""Determine whether a characterist group from the data is valid.
Args:
this_survey (str): A survey code from the data
this_indicator (str): An indicator code from the data
Returns:
True or False to say if the related characteristic group code
should be included in the return set.
"""
if not survey_list and indicator is None:
keep = True
elif not survey_list and indicator is not None:
keep = this_indicator == indicator
elif survey_list and indicator is None:
keep = this_survey in survey_list
else:
survey_match = this_survey in survey_list
indicator_match = this_indicator == indicator
keep = survey_match and indicator_match
return keep
select_args = (Survey.code, Indicator.code, DatalabData.char_grp1.code)
joined = DatalabData.all_joined(*select_args)
results = joined.distinct().all()
surveys = set()
indicators = set()
char_grps = set()
for survey_code, indicator_code, char_grp_code in results:
if keep_survey(indicator_code, char_grp_code):
surveys.add(survey_code)
if keep_indicator(survey_code, char_grp_code):
indicators.add(indicator_code)
if keep_char_grp(survey_code, indicator_code):
char_grps.add(char_grp_code)
json_obj = {
'survey.id': sorted(list(surveys)),
'indicator.id': sorted(list(indicators)),
'characteristicGroup.id': sorted(list(char_grps))
}
return json_obj
@staticmethod
def all_minimal():
"""Get all datalab data in the minimal style."""
results = DatalabData.filter_minimal(None, None, None, False)
return results
@staticmethod
def combos_indicator(indicator):
"""Get all valid combos of survey and characteristic group.
Args:
indicator_code (str): An indicator code
Returns:
A dictionary with two key names and list values.
"""
select_args = (Survey.code, DatalabData.char_grp1.code)
joined = DatalabData.all_joined(*select_args)
filtered = joined.filter(Indicator.code == indicator)
results = filtered.distinct().all()
survey_codes = set()
char_grp_codes = set()
for item in results:
survey_code = item[0]
survey_codes.add(survey_code)
char_grp_code = item[1]
char_grp_codes.add(char_grp_code)
to_return = {
'survey.id': sorted(list(survey_codes)),
'characteristicGroup.id': sorted(list(char_grp_codes))
}
return to_return
@staticmethod
def combos_char_grp(char_grp_code):
"""Get all valid combos of survey and indicator.
Args:
char_grp_code (str): A characteristic group code
Returns:
A dictionary with two key names and list values.
"""
select_args = (Survey.code, Indicator.code)
joined = DatalabData.all_joined(*select_args)
filtered = joined.filter(DatalabData.char_grp1.code == char_grp_code)
results = filtered.distinct().all()
survey_codes = set()
indicator_codes = set()
for item in results:
survey_code = item[0]
survey_codes.add(survey_code)
indicator_code = item[1]
indicator_codes.add(indicator_code)
to_return = {
'survey.id': sorted(list(survey_codes)),
'indicator.id': sorted(list(indicator_codes))
}
return to_return
@staticmethod
def combos_survey_list(survey_list):
# TODO (jkp 2017-08-29): make better. make hashmaps one to the other
"""Get all valid combos of indicator and characteristic groups.
Args:
survey_list (str): A list of survey codes, comma separated
Returns:
An object.
"""
select_args = (Indicator.code, DatalabData.char_grp1.code)
joined = DatalabData.all_joined(*select_args)
survey_list_sql = DatalabData.survey_list_to_sql(survey_list)
filtered = joined.filter(survey_list_sql)
results = filtered.distinct().all()
indicator_dict = {}
char_grp_dict = {}
for item in results:
this_indicator = item[0]
this_char_grp = item[1]
if this_indicator in indicator_dict:
indicator_dict[this_indicator].add(this_char_grp)
else:
indicator_dict[this_indicator] = set([this_char_grp])
if this_char_grp in char_grp_dict:
char_grp_dict[this_char_grp].add(this_indicator)
else:
char_grp_dict[this_char_grp] = set([this_indicator])
new_indicator_dict = {
k: sorted(list(v)) for k, v in indicator_dict.items()
}
new_char_grp_dict = {
k: sorted(list(v)) for k, v in char_grp_dict.items()
}
to_return = {
'indicators': new_indicator_dict,
'characteristicGroups': new_char_grp_dict
}
return to_return
@staticmethod
def combos_indicator_char_grp(indicator_code, char_grp_code):
"""Get all valid surveys from supplied arguments.
Args:
indicator_code (str): An indicator code
char_grp_code (str): A characteristic group code
Returns:
A list of surveys that have data for the supplied indicator and
characteristic group
"""
select_arg = Survey.code
joined = DatalabData.all_joined(select_arg)
filtered = joined.filter(Indicator.code == indicator_code) \
.filter(DatalabData.char_grp1.code == char_grp_code)
results = filtered.distinct().all()
to_return = {
'survey.id': [item[0] for item in results]
}
return to_return
@staticmethod
def init_indicators():
"""Datalab init."""
select_args = Indicator
joined = DatalabData.all_joined(select_args)
ordered = joined.order_by(Indicator.order)
results = ordered.distinct().all()
indicator_categories = []
for ind in results:
for cat in indicator_categories:
if ind.level2.code == cat['label.id']:
cat['indicators'].append(ind.datalab_init_json())
break
else:
indicator_categories.append({
'label.id': ind.level2.code,
'indicators': [ind.datalab_init_json()]
})
return indicator_categories
@staticmethod
def init_char_grp():
"""Datalab init."""
select_args = DatalabData.char_grp1
joined = DatalabData.all_joined(select_args)
ordered = joined.order_by(DatalabData.char_grp1.order)
results = ordered.distinct().all()
chargrp_categories = []
for char_grp in results:
for cat in chargrp_categories:
if char_grp.category.code == cat['label.id']:
cat['characteristicGroups'].append(char_grp.
datalab_init_json())
break
else:
chargrp_categories.append({
'label.id': char_grp.category.code,
'characteristicGroups': [char_grp.datalab_init_json()]
})
return chargrp_categories
@staticmethod
def init_chars():
"""Datalab init."""
select_args = DatalabData.char1
joined = DatalabData.all_joined(select_args)
results = joined.distinct().all()
results = [record.datalab_init_json() if record is not None else "none"
for record in results]
return results
@staticmethod
def init_surveys():
# pylint: disable=too-many-locals
# TODO (2017-09-05 jkp) refactor so that this method is simpler
"""Datalab init."""
select_args = Survey
joined = DatalabData.all_joined(select_args)
ordered = joined.order_by(Country.order) \
.order_by(Geography.order) \
.order_by(Survey.order)
results = ordered.distinct().all()
country_order = []
country_map = {}
country_geo_map = {}
for survey in results:
country = survey.country
country_code = country.code
geo = survey.geography
geo_code = geo.code
country_geo_key = '|'.join((country_code, geo_code))
if country not in country_order:
country_order.append(country)
if country_code in country_map:
if geo not in country_map[country_code]:
country_map[country_code].append(geo)
elif country_code not in country_map:
country_map[country_code] = [geo]
if country_geo_key in country_geo_map:
country_geo_map[country_geo_key].append(survey)
else:
country_geo_map[country_geo_key] = [survey]
survey_country_list = []
for country in country_order:
this_country_geos = country_map[country.code]
geography_list = []
for geo in this_country_geos:
country_geo_key = '|'.join((country.code, geo.code))
surveys = country_geo_map[country_geo_key]
survey_list = [s.datalab_init_json() for s in surveys]
this_geo_obj = {
'label.id': geo.subheading.code,
'surveys': survey_list
}
geography_list.append(this_geo_obj)
this_country_obj = {
'label.id': country.label.code,
'geographies': geography_list
}
survey_country_list.append(this_country_obj)
return survey_country_list
# TODO: (jkp 2017-08-29) Get other languages. Needs: Nothing.
@staticmethod
def init_strings():
"""Datalab init."""
results = EnglishString.query.all()
results = [record.datalab_init_json() for record in results]
results = dict(ChainMap(*results))
return results
@staticmethod
def init_languages():
"""Datalab init."""
return Translation.languages()
@staticmethod
def datalab_init():
"""Datalab Init."""
return {
'indicatorCategories': DatalabData.init_indicators(),
'characteristicGroupCategories': DatalabData.init_char_grp(),
'characteristics': DatalabData.init_chars(),
'surveyCountries': DatalabData.init_surveys(),
'strings': DatalabData.init_strings(),
'languages': DatalabData.init_languages()
}
@staticmethod
def query_input(survey, indicator, char_grp):
"""Build up a dictionary of query input to return with API result.
Args:
survey (str): A list of survey codes separated by a comma
indicator (str): An indicator code
char_grp (str): A characteristic group code
Returns:
A dictionary with lists of input data. Data is from datalab init.
"""
survey_list = sorted(survey.split(',')) if survey else []
survey_records = Survey.get_by_code(survey_list)
input_survey = [r.datalab_init_json(False) for r in survey_records]
indicator_records = Indicator.get_by_code(indicator)
if indicator_records:
input_indicator = [indicator_records[0].datalab_init_json()]
else:
input_indicator = None
char_grp_records = CharacteristicGroup.get_by_code(char_grp)
if char_grp_records:
input_char_grp = [char_grp_records[0].datalab_init_json()]
else:
input_char_grp = None
query_input = {
'surveys': input_survey,
'characteristicGroups': input_char_grp,
'indicators': input_indicator
}
return query_input
|
|
#!/usr/bin/python
# Copyright (c) 2010, Andrej Bauer, http://andrej.com/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
# SIMPLE RANDOM ART IN PYTHON
#
# Version 2010-04-21
#
# I get asked every so often to release the source code for my random art
# project at http://www.random-art.org/. The original source is written in Ocaml
# and is not publicly available, but here is a simple example of how you can get
# random art going in python in 250 lines of code.
#
# The idea is to generate expression trees that describe an image. For each
# point (x,y) of the image we evaluate the expression and get a color. A color
# is represented as a triple (r,g,b) where the red, green, blue components are
# numbers between -1 and 1. In computer graphics it is more usual to use the
# range [0,1], but since many operations are symmetric with respect to the
# origin it is more convenient to use the interval [-1,1].
#
# I kept the program as simple as possible, and independent of any non-standard
# Python libraries. Consequently, a number of improvements and further
# experiments are possible:
#
# * The most pressing problem right now is that the image is displayed as a
# large number of rectangles of size 1x1 on the tkinter Canvas, which
# consumes a great deal of memory. You will not be able to draw large images
# this way. An improved version would use the Python imagining library (PIL)
# instead.
#
# * The program uses a simple RGB (Red Green Blue) color model. We could also
# use the HSV model (Hue Saturation Value), and others. One possibility is
# to generate a palette of colors and use only colors that are combinations
# of those from the palette.
#
# * Of course, you can experiment by introducing new operators. If you are going
# to play with the source, your first exercise should be a new operator.
#
# * The program uses cartesian coordinates. You could experiment with polar
# coordinates.
#
# For more information and further discussion, see http://math.andrej.com/category/random-art/
import math
import random
# Utility functions
curId = 0
def getNextID():
global curId
curId += 1
return curId - 1
def average(c1, c2, w=0.5):
'''Compute the weighted average of two colors. With w = 0.5 we get the average.'''
(r1,g1,b1) = c1
(r2,g2,b2) = c2
r3 = w * r1 + (1 - w) * r2
g3 = w * g1 + (1 - w) * g2
b3 = w * b1 + (1 - w) * b2
return (r3, g3, b3)
def rgb(r,g,b):
'''Convert a color represented by (r,g,b) to a string understood by tkinter.'''
u = max(0, min(255, int(128 * (r + 1))))
v = max(0, min(255, int(128 * (g + 1))))
w = max(0, min(255, int(128 * (b + 1))))
return '#%02x%02x%02x' % (u, v, w)
def well(x):
'''A function which looks a bit like a well.'''
return 1 - 2 / (1 + x*x) ** 8
def tent(x):
'''A function that looks a bit like a tent.'''
return 1 - 2 * abs(x)
# We next define classes that represent expression trees.
# Each object that reprents and expression should have an eval(self,x,y,t) method
# which computes the value of the expression at (x,y). The __init__ should
# accept the objects representing its subexpressions. The class definition
# should contain the arity attribute which tells how many subexpressions should
# be passed to the __init__ constructor.
class VariableX():
arity = 0
def __init__(self):
self.id = getNextID()
def __repr__(self): return "x"
def printme(self, depth):
print (" " * depth) + "X"
def eval(self,x,y,t): return (x,x,x)
def getShader(self):
return "vec3 val%d = vec3(vPosition.x);\n" % self.id
class VariableY():
arity = 0
def __init__(self):
self.id = getNextID()
def __repr__(self): return "y"
def printme(self, depth):
print (" " * depth) + "Y"
def eval(self,x,y,t): return (y,y,y)
def getShader(self):
return "vec3 val%d = vec3(vPosition.y);\n" % self.id
class VariableT():
arity = 0
def __init__(self):
self.id = getNextID()
def __repr__(self): return "t"
def printme(self, depth):
print (" " * depth) + "Constant"
def eval(self,x,y,t): return (t,t,t)
def getShader(self):
return "vec3 val%d = vec3(vTime.x);\n" % self.id
class Constant():
arity = 0
def __init__(self):
self.id = getNextID()
self.c = (random.uniform(0,1), random.uniform(0,1), random.uniform(0,1))
def __repr__(self):
return 'Constant(%g,%g,%g)' % self.c
def printme(self, depth):
print (" " * depth) + "Constant"
def eval(self,x,y,t): return self.c
def getShader(self):
return "vec3 val{0} = vec3({1}, {2}, {3});\n".format(self.id, self.c[0], self.c[1], self.c[2])
class Sum():
arity = 2
def __init__(self, e1, e2):
self.id = getNextID()
self.e1 = e1
self.e2 = e2
def __repr__(self):
return 'Sum(%s, %s)' % (self.e1, self.e2)
def printme(self, depth):
print (" " * depth) + "Sum"
self.e1.printme(depth + 1)
self.e2.printme(depth + 1)
def eval(self,x,y,t):
return average(self.e1.eval(x,y,t), self.e2.eval(x,y,t))
def getShader(self):
substr = self.e1.getShader()
substr += self.e2.getShader()
substr += "vec3 val{0} = (val{1} + val{2}) / 2.0;\n".format(self.id, self.e1.id, self.e2.id)
return substr
class Product():
arity = 2
def __init__(self, e1, e2):
self.id = getNextID()
self.e1 = e1
self.e2 = e2
def __repr__(self):
return 'Product(%s, %s)' % (self.e1, self.e2)
def printme(self, depth):
print (" " * depth) + "Product"
self.e1.printme(depth + 1)
self.e2.printme(depth + 1)
def eval(self,x,y,t):
(r1,g1,b1) = self.e1.eval(x,y,t)
(r2,g2,b2) = self.e2.eval(x,y,t)
r3 = r1 * r2
g3 = g1 * g2
b3 = b1 * b2
return (r3, g3, b3)
def getShader(self):
substr = self.e1.getShader()
substr += self.e2.getShader()
substr += "vec3 val{0} = val{1} * val{2};\n".format(self.id, self.e1.id, self.e2.id)
return substr
class Mod():
arity = 2
def __init__(self, e1, e2):
self.id = getNextID()
self.e1 = e1
self.e2 = e2
def __repr__(self):
return 'Mod(%s, %s)' % (self.e1, self.e2)
def printme(self, depth):
print (" " * depth) + "Mod"
self.e1.printme(depth + 1)
self.e2.printme(depth + 1)
def eval(self,x,y,t):
(r1,g1,b1) = self.e1.eval(x,y,t)
(r2,g2,b2) = self.e2.eval(x,y,t)
try:
r3 = r1 % r2
g3 = g1 % g2
b3 = b1 % b2
return (r3, g3, b3)
except:
return (0,0,0)
def getShader(self):
substr = self.e1.getShader()
substr += self.e2.getShader()
substr += "vec3 val{0} = mod(val{1}, val{2});\n".format(self.id, self.e1.id, self.e2.id)
return substr
class Well():
arity = 1
def __init__(self, e):
self.id = getNextID()
self.e = e
def __repr__(self):
return 'Well(%s)' % self.e
def printme(self, depth):
print (" " * depth) + "Well"
self.e.printme(depth + 1)
def eval(self,x,y,t):
(r,g,b) = self.e.eval(x,y,t)
return (well(r), well(g), well(b))
def getShader(self):
substr = self.e.getShader()
substr += "vec3 val{0} = 1.0 - (2.0 / (pow(1.0 + pow(val{1}, vec3(2.0)), vec3(8.0))));\n".format(self.id, self.e.id)
return substr
class Tent():
arity = 1
def __init__(self, e):
self.id = getNextID()
self.e = e
def __repr__(self):
return 'Tent(%s)' % self.e
def printme(self, depth):
print (" " * depth) + "Tent"
self.e.printme(depth + 1)
def eval(self,x,y,t):
(r,g,b) = self.e.eval(x,y,t)
return (tent(r), tent(g), tent(b))
def getShader(self):
substr = self.e.getShader()
substr += "vec3 val{0} = 1.0 - (2.0 * abs(val{1}));\n".format(self.id, self.e.id)
return substr
class Sin():
arity = 1
def __init__(self, e):
self.id = getNextID()
self.e = e
self.phase = random.uniform(0, math.pi)
self.freq = random.uniform(1.0, 6.0)
def __repr__(self):
return 'Sin(%g + %g * %s)' % (self.phase, self.freq, self.e)
def printme(self, depth):
print (" " * depth) + "Sin"
self.e.printme(depth + 1)
def eval(self,x,y,t):
(r1,g1,b1) = self.e.eval(x,y,t)
r2 = math.sin(self.phase + self.freq * r1)
g2 = math.sin(self.phase + self.freq * g1)
b2 = math.sin(self.phase + self.freq * b1)
return (r2,g2,b2)
def getShader(self):
substr = self.e.getShader()
substr += "vec3 val{0} = sin({1} + ({2} * val{3}));\n".format(self.id, self.phase, self.freq, self.e.id)
return substr
class Cos():
arity = 1
def __init__(self, e):
self.id = getNextID()
self.e = e
self.phase = random.uniform(0, math.pi)
self.freq = random.uniform(1.0, 6.0)
def __repr__(self):
return 'Cos(%g + %g * %s)' % (self.phase, self.freq, self.e)
def printme(self, depth):
print (" " * depth) + "Cos"
self.e.printme(depth + 1)
def eval(self,x,y,t):
(r1,g1,b1) = self.e.eval(x,y,t)
r2 = math.cos(self.phase + self.freq * r1)
g2 = math.cos(self.phase + self.freq * g1)
b2 = math.cos(self.phase + self.freq * b1)
return (r2,g2,b2)
def getShader(self):
substr = self.e.getShader()
substr += "vec3 val{0} = cos({1} + ({2} * val{3}));\n".format(self.id, self.phase, self.freq, self.e.id)
return substr
class Level():
arity = 3
def __init__(self, level, e1, e2):
self.id = getNextID()
self.treshold = random.uniform(-1.0,1.0)
self.level = level
self.e1 = e1
self.e2 = e2
def __repr__(self):
return 'Level(%g, %s, %s, %s)' % (self.treshold, self.level, self.e1, self.e2)
def printme(self, depth):
print (" " * depth) + "Level"
self.level.printme(depth + 1)
self.e1.printme(depth + 1)
self.e2.printme(depth + 1)
def eval(self,x,y,t):
(r1, g1, b1) = self.level.eval(x,y,t)
(r2, g2, b2) = self.e1.eval(x,y,t)
(r3, g3, b3) = self.e2.eval(x,y,t)
r4 = r2 if r1 < self.treshold else r3
g4 = g2 if g1 < self.treshold else g3
b4 = b2 if b1 < self.treshold else b3
return (r4,g4,b4)
class Mix():
arity = 3
def __init__(self, w, e1, e2):
self.id = getNextID()
self.w = w
self.e1 = e1
self.e2 = e2
def __repr__(self):
return 'Mix(%s, %s, %s)' % (self.w, self.e1, self.e2)
def printme(self, depth):
print (" " * depth) + "Mix"
self.w.printme(depth + 1)
self.e1.printme(depth + 1)
self.e2.printme(depth + 1)
def eval(self,x,y,t):
w = 0.5 * (self.w.eval(x,y,t)[0] + 1.0)
c1 = self.e1.eval(x,y,t)
c2 = self.e2.eval(x,y,t)
return average(c1,c2,)
def getShader(self):
#dont use w since it is not actually used?
substr = self.e1.getShader()
substr += self.e2.getShader()
substr += "vec3 val{0} = (val{1} + val{2}) / 2.0;\n".format(self.id, self.e1.id, self.e2.id)
return substr
# The following list of all classes that are used for generation of expressions is
# used by the generate function below.
#remove level because its a pain to write LOL
#operators = (VariableX, VariableY, VariableT, Constant, Sum, Product, Mod, Sin, Cos, Tent, Well, Level, Mix)
operators = (VariableX, VariableY, VariableT, Constant, Sum, Product, Mod, Sin, Cos, Tent, Well, Mix)
# We precompute those operators that have arity 0 and arity > 0
operators0 = [op for op in operators if op.arity == 0]
operators1 = [op for op in operators if op.arity > 0]
def generate(k = 50):
'''Randonly generate an expession of a given size.'''
if k <= 0:
# We used up available size, generate a leaf of the expression tree
op = random.choice(operators0)
return op()
else:
# randomly pick an operator whose arity > 0
op = random.choice(operators1)
# generate subexpressions
i = 0 # the amount of available size used up so far
args = [] # the list of generated subexpression
for j in sorted([random.randrange(k) for l in range(op.arity-1)]):
args.append(generate(j - i))
i = j
args.append(generate(k - 1 - i))
return op(*args)
class Art():
"""A simple graphical user interface for random art. It displays the image,
and the 'Again!' button."""
def __init__(self, seed=None, size=4096):
self.size=size
self.seed = seed
def MakeArt(self):
if self.seed is not None:
random.seed(self.seed)
xpixels = int(math.sqrt(self.size))
ypixels = xpixels
frames = 256
art = generate(random.randrange(20,150))
#art.printme(0)
outstr = art.getShader()
outstr += "gl_FragColor.rgb = val{0};".format(art.id)
outstr += "gl_FragColor.a = 1.0;"
return outstr
# Main program
if __name__ == "__main__":
art = Art(512*512)
print art.MakeArt()
|
|
"""
This module is used for creating a ItemLookup response parser from amazon's AWS API.
"""
from base import BaseLookupWrapper, first_element, parse_bool, parse_float, parse_int
class Item(BaseLookupWrapper):
@property
@first_element
def asin(self):
return self.xpath('./a:ASIN/text()')
@property
@first_element
def parent_asin(self):
return self.xpath('./a:ParentASIN/text()')
class Offers(BaseLookupWrapper):
class Offer(BaseLookupWrapper):
class Listing(BaseLookupWrapper):
@property
@first_element
def offer_listing_id(self):
return self.xpath('./a:OfferListingId/text()')
@property
@parse_float
@first_element
def price(self):
return self.xpath('./a:Price/a:FormattedPrice/text()')
@property
@parse_float
@first_element
def amount_saved(self):
return self.xpath('./a:AmountSaved/a:FormattedPrice/text()')
@property
@parse_int
@first_element
def percentage_saved(self):
return self.xpath('./a:PercentageSaved/text()')
# ToDo: AvailibilityAttributes
@property
@parse_bool
@first_element
def is_eligible_for_super_saver_shipping(self):
return self.xpath('./a:IsEligibleForSuperSaverShipping/text()')
@property
@parse_bool
@first_element
def is_eligible_for_prime(self):
return self.xpath('./a:IsEligibleForPrime/text()')
def __repr__(self):
return '<OfferListing price={} is_eligible_for_prime={}>'.format(self.price, self.is_eligible_for_prime)
@property
@first_element
def condition(self):
return self.xpath('./a:OfferAttributes/a:Condition/text()')
@property
def _offer_listings(self):
return [self.Listing(x) for x in self.xpath('.//a:OfferListing')]
@property
def offer_listings(self):
"""
Deprecated since offer listings element will always only contain the
lowest priced/buy box seller.
:return:
"""
import warnings
warnings.warn('offer_listings is no longer useful since only one offer listing is returned. Use offer_listing instead')
return self._offer_listings
@property
def offer_listing(self):
if not self._offer_listings:
return None
return self._offer_listings[0]
@property
@first_element
def merchant_name(self):
return self.xpath('./a:Merchant/a:Name/text()')
def __repr__(self):
return '<Offer merchant_name={} condition={} price={} prime={}>'.format(self.merchant_name, self.condition, self.offer_listing.price, self.offer_listing.is_eligible_for_prime)
@property
@parse_int
@first_element
def total_offers(self):
return self.xpath('./a:Offers/a:TotalOffers/text()')
@property
@parse_int
@first_element
def total_offer_pages(self):
return self.xpath('./a:Offers/a:TotalOfferPages/text()')
@property
@first_element
def more_offers_url(self):
return self.xpath('./a:Offers/a:MoreOffersUrl/text()')
@property
def offers(self):
return [self.Offer(x) for x in self.xpath('.//a:Offer')]
def __repr__(self):
return '<Offers total_offers={} offers={}>'.format(self.total_offers, self.offers)
class OfferSummary(BaseLookupWrapper):
"""
Used to wrap the elements which are returned by the OfferSummary ResponseGroup in the ItemLookup response.
http://docs.aws.amazon.com/AWSECommerceService/latest/DG/RG_OfferSummary.html
"""
@property
def offer_summary(self):
r = self.xpath('./a:OfferSummary')
if r:
return self.Summary(r[0])
return self.Summary(None)
class Summary(BaseLookupWrapper):
@property
@parse_float
@first_element
def lowest_new_price(self):
return self.xpath('./a:LowestNewPrice/a:FormattedPrice/text()')
@property
@parse_float
@first_element
def lowest_used_price(self):
return self.xpath('./a:LowestUsedPrice/a:FormattedPrice/text()')
@property
@parse_float
@first_element
def lowest_collectible_price(self):
return self.xpath('./a:LowestCollectiblePrice/a:FormattedPrice/text()')
@property
@parse_float
@first_element
def lowest_refurbished_price(self):
return self.xpath('./a:LowestRefurbishedPrice/a:FormattedPrice/text()')
@property
@parse_int
@first_element
def total_new(self):
return self.xpath('./a:TotalNew/text()')
@property
@parse_int
@first_element
def total_used(self):
return self.xpath('./a:TotalUsed/text()')
@property
@parse_int
@first_element
def total_collectible(self):
return self.xpath('./a:TotalCollectible/text()')
@property
@parse_int
@first_element
def total_refurbished(self):
return self.xpath('./a:TotalRefurbished/text()')
class SalesRank(BaseLookupWrapper):
"""
Used to wrap the elements which are returned by the SalesRank ResponseGroup in the ItemLookup response.
http://docs.aws.amazon.com/AWSECommerceService/latest/DG/RG_SalesRank.html
"""
@property
@parse_int
@first_element
def sales_rank(self):
return self.xpath('./a:SalesRank/text()')
class ItemLinks(BaseLookupWrapper):
@property
@first_element
def detail_page_url(self):
return self.xpath('./a:DetailPageURL/text()')
# ToDo: item_links should return a list of objects which parses out url and description to maintain consistency
@property
def item_links(self):
item_links = [BaseLookupWrapper(x) for x in self.xpath('./a:ItemLinks//a:ItemLink')]
return [(x.xpath('./a:Description/text()')[0].strip(), x.xpath('./a:URL/text()')[0].strip()) for x in item_links]
class BaseImageWrapper(BaseLookupWrapper):
"""
Used to wrap any element which contains image data. (height, width, url)
"""
def mk_img_from_elem(self, elem):
elem = [elem]
return self._mk_img(elems=elem)
def mk_img_from_xpath(self, xpath):
return self._mk_img(xpath=xpath)
def _mk_img(self, xpath=None, elems=None):
# elem should at least contain one element even if it's None to prevent IndexErrors
# on the next line.
elem = elems or self.xpath(xpath) or [None]
return self.Img(elem[0])
class Img(BaseLookupWrapper):
@property
@first_element
def url(self):
return self.xpath('./a:URL/text()')
@property
@parse_int
@first_element
def height(self):
return self.xpath('./a:Height/text()')
@property
@parse_int
@first_element
def width(self):
return self.xpath('./a:Width/text()')
def __repr__(self):
return '<ImageElement url={} height={} width={}>'.format(self.url, self.height, self.width)
class Images(BaseImageWrapper):
"""
Used to wrap the elements which are returned by the Images ResponseGroup in the ItemLookup response.
http://docs.aws.amazon.com/AWSECommerceService/latest/DG/RG_Images.html
"""
class ImageSet(BaseImageWrapper):
"""
Used to wrap an ImageSet element for parsing.
"""
@property
def swatch_image(self):
return self.mk_img_from_xpath('./a:SwatchImage')
@property
def small_image(self):
return self.mk_img_from_xpath('./a:SmallImage')
@property
def thumbnail_image(self):
return self.mk_img_from_xpath('./a:ThumbnailImage')
@property
def tiny_image(self):
return self.mk_img_from_xpath('./a:TinyImage')
@property
def medium_image(self):
return self.mk_img_from_xpath('./a:MediumImage')
@property
def large_image(self):
return self.mk_img_from_xpath('./a:LargeImage')
def all(self):
return dict(
swatch_image=self.swatch_image.url,
small_image=self.small_image.url,
thumbnail_image=self.thumbnail_image.url,
tiny_image=self.tiny_image.url,
medium_image=self.medium_image.url,
large_image=self.large_image.url
)
@property
def small_image(self):
return self.mk_img_from_xpath('./a:SmallImage')
@property
def medium_image(self):
return self.mk_img_from_xpath('./a:MediumImage')
@property
def large_image(self):
return self.mk_img_from_xpath('./a:LargeImage')
@property
def image_set_variant(self):
image_set_element_list = self.xpath('./a:ImageSets/a:ImageSet[@Category="variant"]')
if not image_set_element_list:
return self.ImageSet(None)
return self.ImageSet(image_set_element_list[0])
@property
def image_set_primary(self):
image_set_element_list = self.xpath('./a:ImageSets/a:ImageSet[@Category="primary"]')
if not image_set_element_list:
return self.ImageSet(None)
return self.ImageSet(image_set_element_list[0])
class BaseDimensionsWrapper(BaseLookupWrapper):
"""
Element wrapper which is used to parse out dimensions from elements which contain height/width/length/weight.
"""
def mk_dimens_from_elem(self, elem):
elem = [elem]
return self._mk_dimens(elems=elem)
def mk_dimens_from_xpath(self, xpath):
return self._mk_dimens(xpath=xpath)
def _mk_dimens(self, xpath=None, elems=None):
elem = elems or self.xpath(xpath)
return self.Dimens(elem[0] if elem else None)
class Dimens(BaseLookupWrapper):
@property
@parse_int
@first_element
def height(self):
return self.xpath('./a:Height/text()')
@property
@parse_int
@first_element
def length(self):
return self.xpath('./a:Length/text()')
@property
@parse_int
@first_element
def width(self):
return self.xpath('./a:Width/text()')
@property
@parse_int
@first_element
def weight(self):
return self.xpath('./a:Weight/text()')
def __repr__(self):
return '<DimensionsElement length={} height={} width={}>'.format(self.length, self.height, self.width)
class ItemAttributes(BaseLookupWrapper):
"""
Used to wrap the elements which are returned by the ItemAttributes ResponseGroup in the ItemLookup response.
http://docs.aws.amazon.com/AWSECommerceService/latest/DG/RG_ItemAttributes.html
"""
@property
def item_attributes(self):
r = self.xpath('./a:ItemAttributes')
if r:
return ItemAttributes.Attributes(r[0])
return ItemAttributes.Attributes(None)
class Attributes(BaseDimensionsWrapper):
@property
@first_element
def actor(self): # ToDo: test
return self.xpath('./a:Actor/text()')
@property
@first_element
def artist(self): # ToDo: test
return self.xpath('./a:Artist/text()')
@property
@first_element
def aspect_ratio(self): # ToDo: test
return self.xpath('./a:AspectRatio/text()')
@property
@first_element
def audience_rating(self): # ToDo: test
return self.xpath('./a:AudienceRating/text()')
@property
@first_element
def audio_format(self): # ToDo: test
return self.xpath('./a:AudioFormat/text()')
@property
@first_element
def author(self): # ToDo: test
return self.xpath('./a:Author/text()')
@property
@first_element
def binding(self):
return self.xpath('./a:Binding/text()')
@property
@first_element
def brand(self):
return self.xpath('./a:Brand/text()')
@property
@first_element
def category(self): # ToDo: test
return self.xpath('./a:Category/text()')
@property
@first_element
def cero_age_rating(self): # ToDo: test
return self.xpath('./a:CEROAgeRating/text()')
@property
@first_element
def clothing_size(self): # ToDo: test
return self.xpath('./a:ClothingSize/text()')
@property
@first_element
def color(self): # ToDo: test
return self.xpath('./a:Color/text()')
# ToDo: Creator/Role
@property
def catalog_number_list(self):
return [x.strip() for x in self.xpath('./a:CatalogNumberList//text()') if x.strip()]
@property
@first_element
def ean(self):
return self.xpath('./a:EAN/text()')
@property
def ean_list(self):
return [x.strip() for x in self.xpath('./a:EANList/a:EANListElement//text()') if x.strip()]
@property
def features(self):
return [x.strip() for x in self.xpath('.//a:Feature/text()') if x.strip()]
@property
@parse_bool
@first_element
def is_adult_product(self):
return self.xpath('./a:IsAdultProduct/text()')
@property
def item_dimensions(self):
return self.mk_dimens_from_xpath('./a:ItemDimensions')
@property
@first_element
def label(self):
return self.xpath('./a:Label/text()')
@property
@parse_float
@first_element
def list_price(self):
return self.xpath('./a:ListPrice/a:FormattedPrice/text()')
@property
@first_element
def manufacturer(self):
return self.xpath('./a:Manufacturer/text()')
@property
@first_element
def model(self):
return self.xpath('./a:Model/text()')
@property
@first_element
def mpn(self):
return self.xpath('./a:MPN/text()')
@property
@parse_int
@first_element
def number_of_items(self):
return self.xpath('./a:NumberOfItems/text()')
@property
def package_dimensions(self):
return self.mk_dimens_from_xpath('./a:PackageDimensions')
@property
@parse_int
@first_element
def package_quantity(self):
return self.xpath('./a:PackageQuantity/text()')
@property
@first_element
def part_number(self):
return self.xpath('./a:PartNumber/text()')
@property
@first_element
def product_group(self):
return self.xpath('./a:ProductGroup/text()')
@property
@first_element
def product_type_name(self):
return self.xpath('./a:ProductTypeName/text()')
@property
@first_element
def publication_date(self):
return self.xpath('./a:PublicationDate/text()')
@property
@first_element
def publisher(self):
return self.xpath('./a:Publisher/text()')
@property
@first_element
def release_date(self):
return self.xpath('./a:ReleaseDate/text()')
@property
@first_element
def studio(self):
return self.xpath('./a:Studio/text()')
@property
@first_element
def title(self):
return self.xpath('./a:Title/text()')
@property
@first_element
def upc(self):
return self.xpath('./a:UPC/text()')
@property
def upc_list(self):
return [x.strip() for x in self.xpath('./a:UPCList//a:UPCListElement/text()') if x.strip()]
class BrowseNodes(BaseLookupWrapper):
class BrowseNode(BaseLookupWrapper):
@property
@first_element
def browse_node_id(self):
return self.xpath('./a:BrowseNodeId/text()')
@property
@first_element
def name(self):
return self.xpath('./a:Name/text()')
@property
@first_element
def _next_ancestor(self):
return self.xpath('./a:Ancestors/a:BrowseNode')
@property
def has_ancestor(self):
return self._next_ancestor is not None
@property
def next_ancestor(self):
return BrowseNodes.BrowseNode(self._next_ancestor)
def __repr__(self):
return '<BrowseNode name={} browse_node_id={}>'.format(self.name, self.browse_node_id)
@property
def browse_nodes(self):
l = []
browse_node = self.first_browse_node
l.append(browse_node)
while browse_node.has_ancestor:
browse_node = browse_node.next_ancestor
l.append(browse_node)
return l
@property
@first_element
def _first_browse_node(self):
return self.xpath('./a:BrowseNodes/a:BrowseNode')
@property
def first_browse_node(self):
return self.BrowseNode(self._first_browse_node)
# ToDo: EditorialReview
# ToDo: ItemIds
# ToDo: Accessories
# ToDo: Reviews
# ToDo: Similarities
# ToDo: Tracks
# ToDo: PromotionSummary
# ToDo: RelatedItems
# ToDo: VariationImages
# ToDo: Variations
class Small(ItemLinks, ItemAttributes):
pass
class Medium(Small, OfferSummary, SalesRank, Images):
pass
class Large(Medium, Offers, BrowseNodes):
pass
class OfferFull(Offers, OfferSummary):
"""
Dont mix with Large. It will cause an MRO Error.
Large already contains the Offers class which contains all response groups returned
by OfferFull. Offer full should only be used if you're Requesting OfferFull in conjuction
with anything other than Large.
If requesting Large and OfferFull response groups, just use Large.
"""
pass
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The module execute a test of one skill intent.
Using a mocked message bus this module is responsible for sending utterences
and testing that the intent is called.
The module runner can test:
That the expected intent in the skill is activated
That the expected parameters are extracted from the utterance
That Mycroft contexts are set or removed
That the skill speak the intended answer
The content of any message exchanged between the skill and the mycroft core
To set up a test the test runner can
Send an utterance, as the user would normally speak
Set up and remove context
Set up a custom timeout for the test runner, to allow for skills that runs
for a very long time
"""
from queue import Queue, Empty
from copy import copy
import json
import time
import os
import re
import ast
from os.path import join, isdir, basename
from pyee import EventEmitter
from numbers import Number
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill, FallbackSkill
from mycroft.skills.skill_loader import SkillLoader
from mycroft.configuration import Configuration
from mycroft.util.log import LOG
from logging import StreamHandler
from io import StringIO
from contextlib import contextmanager
from .colors import color
from .rules import (intent_type_check, play_query_check, question_check,
expected_data_check, expected_dialog_check,
changed_context_check)
MainModule = '__init__'
DEFAULT_EVALUAITON_TIMEOUT = 30
# Set a configuration value to allow skills to check if they're in a test
Configuration.get()['test_env'] = True
class SkillTestError(Exception):
pass
@contextmanager
def temporary_handler(log, handler):
"""Context manager to replace the default logger with a temporary logger.
Args:
log (LOG): mycroft LOG object
handler (logging.Handler): Handler object to use
"""
old_handler = log.handler
log.handler = handler
yield
log.handler = old_handler
def create_skill_descriptor(skill_path):
return {"path": skill_path}
def get_skills(skills_folder):
"""Find skills in the skill folder or sub folders.
Recursive traversal into subfolders stop when a __init__.py file
is discovered
Args:
skills_folder: Folder to start a search for skills __init__.py
files
Returns:
list: the skills
"""
skills = []
def _get_skill_descriptor(skills_folder):
if not isdir(skills_folder):
return
if MainModule + ".py" in os.listdir(skills_folder):
skills.append(create_skill_descriptor(skills_folder))
return
possible_skills = os.listdir(skills_folder)
for i in possible_skills:
_get_skill_descriptor(join(skills_folder, i))
_get_skill_descriptor(skills_folder)
skills = sorted(skills, key=lambda p: basename(p['path']))
return skills
def load_skills(emitter, skills_root):
"""Load all skills and set up emitter
Args:
emitter: The emmitter to use
skills_root: Directory of the skills __init__.py
Returns:
tuple: (list of loaded skills, dict with logs for each skill)
"""
skill_list = []
log = {}
for skill in get_skills(skills_root):
path = skill["path"]
skill_id = 'test-' + basename(path)
# Catch the logs during skill loading
from mycroft.util.log import LOG as skills_log
buf = StringIO()
with temporary_handler(skills_log, StreamHandler(buf)):
skill_loader = SkillLoader(emitter, path)
skill_loader.skill_id = skill_id
skill_loader.load()
skill_list.append(skill_loader.instance)
# Restore skill logger since it was created with the temporary handler
if skill_loader.instance:
skill_loader.instance.log = LOG.create_logger(
skill_loader.instance.name)
log[path] = buf.getvalue()
return skill_list, log
def unload_skills(skills):
for s in skills:
s.default_shutdown()
class InterceptEmitter(object):
"""
This class intercepts and allows emitting events between the
skill_tester and the skill being tested.
When a test is running emitted communication is intercepted for analysis
"""
def __init__(self):
self.emitter = EventEmitter()
self.q = None
def on(self, event, f):
# run all events
print("Event: ", event)
self.emitter.on(event, f)
def emit(self, event, *args, **kwargs):
event_name = event.msg_type
if self.q:
self.q.put(event)
self.emitter.emit(event_name, event, *args, **kwargs)
def wait_for_response(self, event, reply_type=None, *args, **kwargs):
"""Simple single thread implementation of wait_for_response."""
message_type = reply_type or event.msg_type + '.response'
response = None
def response_handler(msg):
nonlocal response
response = msg
self.emitter.once(message_type, response_handler)
self.emitter.emit(event.msg_type, event)
return response
def once(self, event, f):
self.emitter.once(event, f)
def remove(self, event_name, func):
pass
def remove_all_listeners(self, event_name):
pass
class MockSkillsLoader(object):
"""Load a skill and set up emitter
"""
def __init__(self, skills_root):
self.load_log = None
self.skills_root = skills_root
self.emitter = InterceptEmitter()
from mycroft.skills.intent_service import IntentService
self.ih = IntentService(self.emitter)
self.skills = None
self.emitter.on(
'mycroft.skills.fallback',
FallbackSkill.make_intent_failure_handler(self.emitter))
def make_response(message):
skill_id = message.data.get('skill_id', '')
data = dict(result=False, skill_id=skill_id)
self.emitter.emit(Message('skill.converse.response', data))
self.emitter.on('skill.converse.request', make_response)
def load_skills(self):
skills, self.load_log = load_skills(self.emitter, self.skills_root)
self.skills = [s for s in skills if s]
self.ih.padatious_service.train(
Message('', data=dict(single_thread=True)))
return self.emitter.emitter # kick out the underlying emitter
def unload_skills(self):
unload_skills(self.skills)
def load_test_case_file(test_case_file):
"""Load a test case to run."""
print("")
print(color.HEADER + "="*20 + " RUNNING TEST " + "="*20 + color.RESET)
print('Test file: ', test_case_file)
with open(test_case_file, 'r') as f:
test_case = json.load(f)
print('Test:', json.dumps(test_case, indent=4, sort_keys=False))
return test_case
class SkillTest(object):
"""
This class is instantiated for each skill being tested. It holds the
data needed for the test, and contains the methods doing the test
"""
def __init__(self, skill, test_case_file, emitter, test_status=None):
self.skill = skill
self.test_case_file = test_case_file
self.emitter = emitter
self.dict = dict
self.output_file = None
self.returned_intent = False
self.test_status = test_status
self.failure_msg = None
self.end_of_skill = False
def run(self, loader):
""" Execute the test
Run a test for a skill. The skill, test_case_file and emitter is
already set up in the __init__ method.
This method does all the preparation and cleanup and calls
self.execute_test() to perform the actual test.
Args:
bool: Test results -- only True if all passed
"""
self.end_of_skill = False # Reset to false at beginning of test
s = [s for s in loader.skills if s and s.root_dir == self.skill]
if s:
s = s[0]
else:
# The skill wasn't loaded, print the load log for the skill
if self.skill in loader.load_log:
print('\n {} Captured Logs from loading {}'.format('=' * 15,
'=' * 15))
print(loader.load_log.pop(self.skill))
raise SkillTestError('Skill couldn\'t be loaded')
orig_get_response = s.get_response
original_settings = s.settings
try:
return self.execute_test(s)
finally:
s.get_response = orig_get_response
s.settings = original_settings
def send_play_query(self, s, test_case):
"""Emit an event triggering the a check for playback possibilities."""
play_query = test_case['play_query']
print('PLAY QUERY', color.USER_UTT + play_query + color.RESET)
self.emitter.emit('play:query', Message('play:query:',
{'phrase': play_query}))
def send_play_start(self, s, test_case):
"""Emit an event starting playback from the skill."""
print('PLAY START')
callback_data = test_case['play_start']
callback_data['skill_id'] = s.skill_id
self.emitter.emit('play:start',
Message('play:start', callback_data))
def send_question(self, test_case):
"""Emit a Question to the loaded skills."""
print("QUESTION: {}".format(test_case['question']))
callback_data = {'phrase': test_case['question']}
self.emitter.emit('question:query',
Message('question:query', data=callback_data))
def send_utterance(self, test_case):
"""Emit an utterance to the loaded skills."""
utt = test_case['utterance']
print("UTTERANCE:", color.USER_UTT + utt + color.RESET)
self.emitter.emit('recognizer_loop:utterance',
Message('recognizer_loop:utterance',
{'utterances': [utt]}))
def apply_test_settings(self, s, test_case):
"""Replace the skills settings with settings from the test_case."""
s.settings = copy(test_case['settings'])
print(color.YELLOW, 'will run test with custom settings:',
'\n{}'.format(s.settings), color.RESET)
def setup_get_response(self, s, test_case):
"""Setup interception of get_response calls."""
def get_response(dialog='', data=None, announcement='',
validator=None, on_fail=None, num_retries=-1):
data = data or {}
utt = announcement or s.dialog_renderer.render(dialog, data)
print(color.MYCROFT + ">> " + utt + color.RESET)
s.speak(utt)
response = test_case['responses'].pop(0)
print("SENDING RESPONSE:",
color.USER_UTT + response + color.RESET)
return response
s.get_response = get_response
def remove_context(self, s, cxt):
"""remove an adapt context."""
if isinstance(cxt, list):
for x in cxt:
MycroftSkill.remove_context(s, x)
else:
MycroftSkill.remove_context(s, cxt)
def set_context(self, s, cxt):
"""Set an adapt context."""
for key, value in cxt.items():
MycroftSkill.set_context(s, key, value)
def send_test_input(self, s, test_case):
"""Emit an utterance, just like the STT engine does. This sends the
provided text to the skill engine for intent matching and it then
invokes the skill.
It also handles some special cases for common play skills and common
query skills.
"""
if 'utterance' in test_case:
self.send_utterance(test_case)
elif 'play_query' in test_case:
self.send_play_query(s, test_case)
elif 'play_start' in test_case:
self.send_play_start(s, test_case)
elif 'question' in test_case:
self.send_question(test_case)
else:
raise SkillTestError('No input provided in test case')
def execute_test(self, s):
""" Execute test case.
Args:
s (MycroftSkill): mycroft skill to test
Returns:
(bool) True if the test succeeded completely.
"""
test_case = load_test_case_file(self.test_case_file)
if 'settings' in test_case:
self.apply_test_settings(s, test_case)
if 'responses' in test_case:
self.setup_get_response(s, test_case)
# If we keep track of test status for the entire skill, then
# get all intents from the skill, and mark current intent
# tested
if self.test_status:
self.test_status.append_intent(s)
if 'intent_type' in test_case:
self.test_status.set_tested(test_case['intent_type'])
evaluation_rule = EvaluationRule(test_case, s)
# Set up queue for emitted events. Because
# the evaluation method expects events to be received in convoy,
# and be handled one by one. We cant make assumptions about threading
# in the core or the skill
q = Queue()
s.bus.q = q
# Set up context before calling intent
# This option makes it possible to better isolate (reduce dependance)
# between test_cases
cxt = test_case.get('remove_context', None)
if cxt:
self.remove_context(s, cxt)
cxt = test_case.get('set_context', None)
if cxt:
self.set_context(s, cxt)
self.send_test_input(s, test_case)
# Wait up to X seconds for the test_case to complete
timeout = self.get_timeout(test_case)
while not evaluation_rule.all_succeeded():
# Process the queue until a skill handler sends a complete message
if self.check_queue(q, evaluation_rule) or time.time() > timeout:
break
self.shutdown_emitter(s)
# Report test result if failed
return self.results(evaluation_rule)
def get_timeout(self, test_case):
"""Find any timeout specified in test case.
If no timeout is specified return the default.
"""
if (test_case.get('evaluation_timeout', None) and
isinstance(test_case['evaluation_timeout'], int)):
return time.time() + int(test_case.get('evaluation_timeout'))
else:
return time.time() + DEFAULT_EVALUAITON_TIMEOUT
def check_queue(self, q, evaluation_rule):
"""Check the queue for events.
If event indicating skill completion is found returns True, else False.
"""
try:
event = q.get(timeout=1)
if ':' in event.msg_type:
event.data['__type__'] = event.msg_type.split(':')[1]
else:
event.data['__type__'] = event.msg_type
evaluation_rule.evaluate(event.data)
if event.msg_type == 'mycroft.skill.handler.complete':
self.end_of_skill = True
except Empty:
pass
if q.empty() and self.end_of_skill:
return True
else:
return False
def shutdown_emitter(self, s):
"""Shutdown the skill connection to the bus."""
# Stop emiter from sending on queue
s.bus.q = None
# remove the skill which is not responding
self.emitter.remove_all_listeners('speak')
self.emitter.remove_all_listeners('mycroft.skill.handler.complete')
def results(self, evaluation_rule):
"""Display and report the results."""
if not evaluation_rule.all_succeeded():
self.failure_msg = str(evaluation_rule.get_failure())
print(color.FAIL + "Evaluation failed" + color.RESET)
print(color.FAIL + "Failure:", self.failure_msg + color.RESET)
return False
return True
# Messages that should not print debug info
HIDDEN_MESSAGES = ['skill.converse.request', 'skill.converse.response',
'gui.page.show', 'gui.value.set']
class EvaluationRule:
"""
This class initially convert the test_case json file to internal rule
format, which is stored throughout the testcase run. All Messages on
the event bus can be evaluated against the rules (test_case)
This approach makes it easier to add new tests, since Message and rule
traversal is already set up for the internal rule format.
The test writer can use the internal rule format directly in the
test_case using the assert keyword, which allows for more
powerfull/individual test cases than the standard dictionaly
"""
def __init__(self, test_case, skill=None):
""" Convert test_case read from file to internal rule format
Args:
test_case: The loaded test case
skill: optional skill to test, used to fetch dialogs
"""
self.rule = []
_x = ['and']
if 'utterance' in test_case and 'intent_type' in test_case:
intent_type = str(test_case['intent_type'])
_x.append(intent_type_check(intent_type))
# Check for adapt intent info
if test_case.get('intent', None):
for item in test_case['intent'].items():
_x.append(['equal', str(item[0]), str(item[1])])
if 'play_query_match' in test_case:
match = test_case['play_query_match']
phrase = match.get('phrase', test_case.get('play_query'))
self.rule.append(play_query_check(skill, match, phrase))
elif 'expected_answer' in test_case:
question = test_case['question']
expected_answer = test_case['expected_answer']
self.rule.append(question_check(skill, question, expected_answer))
# Check for expected data structure
if test_case.get('expected_data'):
expected_items = test_case['expected_data'].items()
self.rule.append(expected_data_check(expected_items))
if _x != ['and']:
self.rule.append(_x)
# Add rules from expeceted_response
# Accepts a string or a list of multiple strings
if isinstance(test_case.get('expected_response', None), str):
self.rule.append(['match', 'utterance',
str(test_case['expected_response'])])
elif isinstance(test_case.get('expected_response', None), list):
texts = test_case['expected_response']
rules = [['match', 'utterance', str(r)] for r in texts]
self.rule.append(['or'] + rules)
# Add rules from expected_dialog
# Accepts dialog (without ".dialog"), the same way as self.speak_dialog
# as a string or a list of dialogs
if test_case.get('expected_dialog', None):
if not skill:
print(color.FAIL +
'Skill is missing, can\'t run expected_dialog test' +
color.RESET)
else:
expected_dialog = test_case['expected_dialog']
self.rule.append(['or'] +
expected_dialog_check(expected_dialog,
skill))
if test_case.get('changed_context', None):
ctx = test_case['changed_context']
for c in changed_context_check(ctx):
self.rule.append(c)
if test_case.get('assert', None):
for _x in ast.literal_eval(test_case['assert']):
self.rule.append(_x)
print("Rule created ", self.rule)
def evaluate(self, msg):
""" Main entry for evaluating a message against the rules.
The rules are prepared in the __init__
This method is usually called several times with different
messages using the same rule set. Each call contributing
to fulfilling all the rules
Args:
msg: The message event to evaluate
"""
if msg.get('__type__', '') not in HIDDEN_MESSAGES:
print("\nEvaluating message: ", msg)
for r in self.rule:
self._partial_evaluate(r, msg)
def _get_field_value(self, rule, msg):
if isinstance(rule, list):
value = msg.get(rule[0], None)
if len(rule) > 1 and value:
for field in rule[1:]:
value = value.get(field, None)
if not value:
break
else:
value = msg.get(rule, None)
return value
def _partial_evaluate(self, rule, msg):
""" Evaluate the message against a part of the rules
Recursive over rules
Args:
rule: A rule or a part of the rules to be broken down further
msg: The message event being evaluated
Returns:
Bool: True if a partial evaluation succeeded
"""
if 'succeeded' in rule: # Rule has already succeeded, test not needed
return True
if rule[0] == 'equal':
if self._get_field_value(rule[1], msg) != rule[2]:
return False
if rule[0] == 'lt':
if not isinstance(self._get_field_value(rule[1], msg), Number):
return False
if self._get_field_value(rule[1], msg) >= rule[2]:
return False
if rule[0] == 'gt':
if not isinstance(self._get_field_value(rule[1], msg), Number):
return False
if self._get_field_value(rule[1], msg) <= rule[2]:
return False
if rule[0] == 'notEqual':
if self._get_field_value(rule[1], msg) == rule[2]:
return False
if rule[0] == 'endsWith':
if not (self._get_field_value(rule[1], msg) and
self._get_field_value(rule[1], msg).endswith(rule[2])):
return False
if rule[0] == 'exists':
if not self._get_field_value(rule[1], msg):
return False
if rule[0] == 'match':
if not (self._get_field_value(rule[1], msg) and
re.match(rule[2], self._get_field_value(rule[1], msg))):
return False
if rule[0] == 'and':
for i in rule[1:]:
if not self._partial_evaluate(i, msg):
return False
if rule[0] == 'or':
for i in rule[1:]:
if self._partial_evaluate(i, msg):
break
else:
return False
rule.append('succeeded')
return True
def get_failure(self):
""" Get the first rule which has not succeeded
Returns:
str: The failed rule
"""
for x in self.rule:
if x[-1] != 'succeeded':
return x
return None
def all_succeeded(self):
""" Test if all rules succeeded
Returns:
bool: True if all rules succeeded
"""
return len([x for x in self.rule if x[-1] != 'succeeded']) == 0
|
|
import random
import string
from copy import copy, deepcopy
import json
from time import time
from pprint import pprint
import re
import numpy as np
from regex_finder import findregex
MAX_WIDTH = 13
X, Y, Z = 'x', 'y', 'z'
def main():
for i in range(10):
chars = set(random.sample(string.ascii_uppercase, random.randint(8, 16)))
grid = HexGrid(chars)
useSpecialHint = False#random.choice([True, False])
generateSolution(grid, useSpecialHint)
hints = generateRegexHints(grid)
name = str(round(time() * 1000000))
board_data = {'size':MAX_WIDTH, 'name':name, 'x':hints[X], 'y':hints[Y], 'z':hints[Z]}
solution = {'rows':getSampleSolution(grid)}
pprint(board_data)
pprint(solution)
puzzlename = 'puzzles/' + board_data['name'] + '.json'
solutionName = 'puzzles/' + board_data['name'] + '_solution.json'
with open(puzzlename, 'w') as f:
json.dump(board_data, f)
with open(solutionName, 'w') as f:
json.dump(solution, f)
class HexGrid:
def __init__(self, chars):
self.grid = self.constructGrid()
self.chars = chars
def constructGrid(self):
grid = []
gridRow = [Cell() for i in range(1, MAX_WIDTH + 1)]
frontBuffer = list(range(MAX_WIDTH // 2, 0, -1)) + [0] * (MAX_WIDTH // 2 + 1)
backBuffer = list(reversed(frontBuffer))
for i in range(MAX_WIDTH):
rowLen = MAX_WIDTH - frontBuffer[i] - backBuffer[i]
row = [None]*frontBuffer[i] + deepcopy(gridRow[:rowLen]) + [None]*backBuffer[i]
grid.append(row)
return grid
def iterDirection(self, direction):
if direction == X:
for row in self.iterXDirection():
yield row
elif direction == Y:
for row in self.iterYDirection():
yield row
elif direction == Z:
for row in self.iterZDirection():
yield row
def iterXDirection(self):
# rows going SW->NE
ar = np.array(self.grid)
fromIdx = MAX_WIDTH // 2
toIdx = -MAX_WIDTH // 2
for i in range(fromIdx, toIdx, -1):
cellRow = []
for cell in reversed(np.fliplr(ar).diagonal(i)):
cellRow.append(cell)
yield cellRow
def iterYDirection(self):
# rows going E->W
for row in self.grid:
cellRow = []
for cell in row:
if not cell:
continue
else:
cellRow.append(cell)
yield cellRow
def iterZDirection(self):
# rows going NW->SE
for col in range(MAX_WIDTH):
cellRow = []
for row in range(MAX_WIDTH):
cell = self.grid[row][col]
if not cell:
continue
else:
cellRow.append(cell)
yield cellRow
class Cell:
def __init__(self):
self.regex = ''
self.allowed = set()
self.notAllowed = set()
def addConstraints(self, allowedChar = None, disallowedChar = None):
if allowedChar:
self.allowed.add(allowedChar)
if disallowedChar:
self.notAllowed.add(disallowedChar)
def compactRegex(self):
self.regex = self.regex.replace('|', '')
self.regex = self.regex.replace('^', '')
self.regex = self.regex.replace('$', '')
def generateSolution(grid, useSpecialSolution = True):
for row in grid.iterYDirection():
if useSpecialSolution and len(row) == MAX_WIDTH:
insertSpecialSolution(row, grid.chars)
else:
for cell in row:
goodCount = random.randint(1,3)
#goodCount = 1 if random.random() > 0.3 else 2
badCount = random.randint(1,3)
#badCount = 1 if random.random() > 0.3 else 2
for i in range(goodCount):
goodChar = random.sample(grid.chars - cell.allowed - cell.notAllowed, 1)[0]
cell.addConstraints(allowedChar = goodChar)
for i in range(badCount):
badChar = random.sample(grid.chars - cell.allowed - cell.notAllowed, 1)[0]
cell.addConstraints(disallowedChar = badChar)
cell.regex = findregex(cell.allowed, cell.notAllowed - cell.allowed)
assert(cell.regex != '')
cell.compactRegex()
def insertSpecialSolution(row, chars):
hint = 'TEXTALGORITHM'
badChars = copy(chars)
for cell, goodChar in zip(row, hint):
cell.allowed.add(goodChar)
#badCount = random.randint(1,2)
badCount = 1 if random.random() > 0.3 else 2
for i in range(badCount):
badChar = random.sample(chars - cell.notAllowed, 1)[0]
cell.addConstraints(disallowedChar = badChar)
def generateRegexHints(grid):
hints = {X:[], Y:[], Z:[]}
for d in (X, Y, Z):
for row in grid.iterDirection(d):
components = []
regex = ''
for c in row:
components.append(c.regex)
regex = shorten('-'.join(components))
hints[d].append(regex)
return hints
def shorten(regex):
components = regex.split('-')
#print(1, 'components:', components)
orGroups = []
regex = ''
for c in components:
#print(2, 'component:', c)
if rnd(0.7):
orGroups.append(c)
else:
regex += mergeOrGroups(orGroups)
#print(3, 'regex:', regex)
regex += mergeOrGroups([c])
#print(4, 'regex:', regex)
orGroups = []
regex += mergeOrGroups(orGroups)
#print(6, 'regex:', regex)
regex = regex.replace('..*', '.*')
regex = re.sub(r'\.\*(\.\*)+', '.*', regex)
regex = regex.replace('**', '*')
#print(8, 'regex:', regex)
return regex
def mergeOrGroups(orGroups):
#print(5, 'orGroups:', orGroups)
if len(orGroups) == 0:
return ''
elif len(orGroups) == 1:
rePart = orGroups.pop()
if len(rePart) > 2:
return '.'
elif len(rePart) > 1:
return '[{}]'.format(rePart)
else:
return rePart
else:
repeatSet = set(''.join(orGroups))
if len(repeatSet) == 1:
return '{}*'.format(repeatSet.pop())
elif len(repeatSet) > 3:
return '.*'
else:
if rnd(0.2):
return '.*'
else:
repeat = '+' if rnd(0.8) else '*'
return '[{}]{}'.format(''.join(sorted(repeatSet)), repeat)
def rnd(x = 0.5):
return random.random() < x
def getAllowedStrings(row):
for cell in row:
print('cell.allowed', cell.allowed)
allowed = [cell.allowed for cell in row]
strings = set(map(''.join, product(*allowed)))
return set(strings)
def getNotAllowedStrings(row):
for cell in row:
print('cell.notAllowed', cell.notAllowed)
notAllowed = [cell.notAllowed for cell in row]
strings = set(map(''.join, product(*notAllowed)))
return set(strings)
def getSampleSolution(grid):
rowSolutions = []
for row in grid.iterDirection(Y):
rowSolution = []
for cell in row:
rowSolution.append(random.choice(list(cell.allowed)))
rowSolutions.append(rowSolution)
return rowSolutions
if __name__ == '__main__':
pass
main()
|
|
from __future__ import unicode_literals
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import no_oracle
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article, Event
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("has_unionagg_method")
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def test_07b_values(self):
"Testing values() and values_list() with aware datetime. See #21565."
Event.objects.create(name="foo", when=timezone.now())
list(Event.objects.values_list('when'))
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertIn('Aurora', names)
self.assertIn('Kecksburg', names)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertIsInstance(qs[0]['point'], GEOSGeometry)
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@skipUnlessDBFeature("supports_collect_aggr")
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry(
'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
'-95.363151 29.763374,-96.801611 32.782057)'
)
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertTrue(ref_geom.equals(coll))
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
|
import webapp2
import re
import weakref
from webapp2 import cached_property
from webapp2_extras import sessions
from google.appengine.api import users
from ferris.core.ndb import encode_key, decode_key
from ferris.core.uri import Uri
from ferris.core import inflector, auth, events, views, request_parsers, response_handlers, routing
from ferris.core.json_util import parse as json_parse, stringify as json_stringify
from bunch import Bunch
_temporary_route_storage = []
def route(f):
"""
Marks a method for automatically routing and accessible via HTTP.
See :mod:`~ferris.core.routing` for more details on how methods are auto-routed.
This decorator should always be the outermost decorator.
For example::
@route
def exterminate(self):
return 'EXTERMINAAATE!'
"""
global _temporary_route_storage
_temporary_route_storage.append((f, (), {}))
return f
def route_with(*args, **kwargs):
"""
Marks a class method to be routed similar to :func:`route` and passes and additional arguments to the webapp2.Route
constructor.
:param template: Sets the URL template for this action
For example::
@route_with(template='/posts/archive/<year>')
def archive_by_year(self, year):
pass
"""
def inner(f):
_temporary_route_storage.append((f, args, kwargs))
return f
return inner
def add_authorizations(*args):
"""
Adds additional authorization chains to a particular action. These are executed after the
chains set in Controller.Meta.
"""
def inner(f):
setattr(f, 'authorizations', args)
return f
return inner
class Controller(webapp2.RequestHandler, Uri):
"""
Controllers allows grouping of common actions and provides them with
automatic routing, reusable components, request data parsering, and
view rendering.
"""
_controllers = []
class __metaclass__(type):
def __new__(meta, name, bases, dict):
global _temporary_route_storage
cls = type.__new__(meta, name, bases, dict)
if name != 'Controller':
# Add to the controller registry
if not cls in Controller._controllers:
Controller._controllers.append(cls)
# Make sure the metaclass as a proper inheritence chain
if not issubclass(cls.Meta, Controller.Meta):
cls.Meta = type('Meta', (cls.Meta, Controller.Meta), {})
cls._route_list = _temporary_route_storage
_temporary_route_storage = []
return cls
# The name of this class, lowercase (automatically determined)
name = 'controller'
#: The current user as determined by ``google.appengine.api.users.get_current_user()``.
user = None
#: View Context, all these variables will be passed to the view.
context = property(lambda self: self.meta.view.context)
class Meta(object):
"""
The Meta class stores configuration information for a Controller. This class is constructed
into an instance and made available at ``self.meta``. This class is optional, Controllers that
do not specify it will receive the default configuration. Additionally, you need not inherit from
this class as Controller's metaclass will ensure it.
For example::
def Posts(Controller):
class Meta: # no inheritance
prefixes = ('admin', )
# all other properties inherited from default.
"""
#: List of components.
#: When declaring a controller, this must be a list or tuple of classes.
#: When the controller is constructed, ``controller.components`` will
#: be populated with instances of these classes.
components = tuple()
#: Prefixes are added in from of controller (like admin_list) and will cause routing
#: to produce a url such as '/admin/name/list' and a name such as 'admin:name:list'
prefixes = tuple()
#: Authorizations control access to the controller. Each authorization is a callable.
#: Authorizations are called in order and all must return True for the request to be
#: processed. If they return False or a tuple like (False, 'message'), the request will
#: be rejected.
#: You should **always** have ``auth.require_admin_for_prefix(prefix=('admin',))`` in your
#: authorization chain.
authorizations = (auth.require_admin_for_prefix(prefix=('admin',)),)
#: Which :class:`~ferris.core.views.View` class to use by default. use :meth:`change_view` to switch views.
View = views.TemplateView
#: Which :class:`RequestParser` class to use by default. See :meth:`Controller.parse_request`.
Parser = 'Form'
def __init__(self, controller):
self._controller = controller
self.view = None
self.change_view(self.View)
def change_view(self, view, persist_context=True):
"""
Swaps the view, and by default keeps context between the two views.
:param view: View class or name.
"""
context = self.view.context if self.view else None
self.View = view if not isinstance(view, basestring) else views.factory(view)
self.view = self.View(self._controller, context)
class Util(object):
"""
Provides some basic utility functions. This class is constructed into an instance
and made available at ``controller.util``.
"""
def __init__(self, controller):
self._controller = controller
#: Decodes a urlsafe ``ndb.Key``.
decode_key = staticmethod(decode_key)
#: Encode an ``ndb.Key`` (or ``ndb.Model`` instance) into an urlsafe string.
encode_key = staticmethod(encode_key)
#: Decodes a json string.
parse_json = staticmethod(json_parse)
#: Encodes a json string.
stringify_json = staticmethod(json_stringify)
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.name = inflector.underscore(self.__class__.__name__)
self.proper_name = self.__class__.__name__
self.util = self.Util(weakref.proxy(self))
self.route = None
def _build_components(self):
self.events.before_build_components(controller=self)
if hasattr(self.Meta, 'components'):
component_classes = self.Meta.components
self.components = Bunch()
for cls in component_classes:
if hasattr(cls, 'name'):
name = cls.name
else:
name = inflector.underscore(cls.__name__)
self.components[name] = cls(weakref.proxy(self))
else:
self.components = Bunch()
self.events.after_build_components(controller=self)
def _init_route(self):
action = self.request.route.handler_method
prefix = None
for possible_prefix in self.Meta.prefixes:
if action.startswith(possible_prefix):
prefix = possible_prefix
action = action.replace(prefix + '_', '')
break
self.route = Bunch(
prefix=prefix,
controller=self.name,
action=action,
name=self.request.route.name,
args=self.request.route_args,
kwargs=self.request.route_kwargs)
def _init_meta(self):
self.user = users.get_current_user()
self._init_route()
self.events = events.NamedBroadcastEvents(prefix='controller_')
self.meta = self.Meta(weakref.proxy(self))
self._build_components()
@classmethod
def _build_routes(cls, router):
"""
Called in the main app router to get all of this controller's routes.
Override to add custom/additional routes.
"""
# Route the rest methods
router.add(routing.build_scaffold_routes_for_controller(cls))
for prefix in cls.Meta.prefixes:
router.add(routing.build_scaffold_routes_for_controller(cls, prefix))
# Auto route the remaining methods
for route in routing.build_routes_for_controller(cls):
vars = re.findall(r'\[(\w+)\]', route.template)
if vars:
action = route.handler_method
split = action.split('_')
prefixed = split[0] in cls.Meta.prefixes
controller_data = {
'prefix': split[0] if prefixed else None,
'controller': inflector.underscore(cls.__name__),
'action': '_'.join(split[1:]) if prefixed else action,
}
for i in vars:
value = controller_data.get(i)
if not value:
continue
route.template = route.template.replace('['+i+']', value)
router.add(route)
events.fire('controller_build_routes', cls=cls, router=router)
def startup(self):
"""
Called when a new request is received and before authorization and dispatching.
This is the main point in which to listen for events or change dynamic configuration.
"""
pass
def _is_authorized(self):
authorizations = self.meta.authorizations
#per-handler authorizations
method = getattr(self, self.request.route.handler_method)
if hasattr(method, 'authorizations'):
authorizations = authorizations + method.authorizations
authorizations = list(authorizations) # convert to list so listeners can modify
self.events.before_authorization(controller=self, authorizations=authorizations)
auth_result = True
for chain in authorizations:
auth_result = chain(self)
if auth_result is not True:
break
if auth_result is not True:
message = u"Authorization chain rejected request"
if isinstance(auth_result, tuple):
message = auth_result[1]
self.events.authorization_failed(controller=self, message=message)
self.abort(403, message)
self.events.after_authorization(controller=self)
def _clear_redirect(self):
if self.response.status_int in [300, 301, 302]:
self.response.status = 200
del self.response.headers['Location']
def dispatch(self):
"""
Calls startup, checks authorization, and then the controller method.
If a view is set and auto rendering is enabled, then it will try to automatically
render the view if the action doesn't return anything.
If the controller method returns anything other than None, auto-rendering is skipped
and the result is transformed into a response using the :mod:`~ferris.core.response_handlers`.
"""
# Setup everything, the session, etc.
self._init_meta()
self.session_store = sessions.get_store(request=self.request)
self.context.set_dotted('this.session', self.session)
self.events.before_startup(controller=self)
self.startup()
self.events.after_startup(controller=self)
# Authorization
self._is_authorized()
# Dispatch to the method
self.events.before_dispatch(controller=self)
result = super(Controller, self).dispatch()
self.events.after_dispatch(response=result, controller=self)
# Return value handlers.
# Response has highest precendence, the view class has lowest.
response_handler = response_handlers.factory(type(result))
if response_handler:
self.response = response_handler(self, result)
# View rendering works similar to the string mode above.
elif self.meta.view.auto_render:
self._clear_redirect()
self.response = self.meta.view.render()
else:
self.abort(500, 'Nothing was able to handle the response %s (%s)' % (result, type(result)))
self.events.dispatch_complete(controller=self)
self.session_store.save_sessions(self.response)
self.events.clear()
return self.response
@cached_property
def session(self):
"""
Sessions are a simple dictionary of data that's persisted across requests for particular
browser session.
Sessions are backed by an encrypted cookie and memcache.
"""
return self.session_store.get_session(backend='memcache')
def parse_request(self, container=None, fallback=None, parser=None):
"""
Parses request data (like GET, POST, JSON, XML) into a container (like a Form or Message)
instance using a :class:`~ferris.core.request_parsers.RequestParser`. By default, it assumes
you want to process GET/POST data into a Form instance, for that simple case you can use::
data = self.parse_request()
provided you've set the Form attribute of the Meta class.
"""
parser_name = parser if parser else self.meta.Parser
parser = request_parsers.factory(parser_name)
if not container:
container_name = parser.container_name
if not hasattr(self.meta, container_name):
raise AttributeError('Meta has no %s class, can not parse request' % container_name)
container = getattr(self.meta, container_name)
return parser.process(self.request, container, fallback)
|
|
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import os, sys
from time import time
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import estimate_bandwidth, MeanShift
import numpy as np
import matplotlib.pyplot as plt
import cv2
from cv2 import imread, imwrite, resize
from features_utils import *
import data_utils as du
from skimage.feature import hog as skhog
import io_utils
def vis_clusters(labels, n_clusters, imgs, w=2000, h=2000, max_img_per_cluster=10,zoom=.07,padding = 10, pix_w=3, pix_h=1.5):
h_cluster = int(h/float(n_clusters))
ax = plt.gca()
#images = [OffsetImage(image, zoom=zoom) for image in cvimgs]
artists = []
half_height = 100
start_x = padding
start_y = padding
for k in range(n_clusters):
my_members = labels == k
idx = np.array(range(len(labels)))
idx_for_k = idx[my_members]
y0 = start_y + (h_cluster*k*pix_h) + (half_height)
for im_i in range(max_img_per_cluster):
if im_i >= idx_for_k.shape[0]:
print "less then min imgs i:{} len:{}".format(im_i, idx_for_k.shape)
break
im_p = imgs[idx_for_k[im_i]]
img = imread(im_p)[:,:,(2,1,0)]
im0 = OffsetImage(img, zoom=zoom)
wf = img.shape[1]*zoom*pix_w
x0 = start_x + (wf*im_i) + (wf/2)
ab = AnnotationBbox(im0, (x0, y0), xycoords='data', frameon=False)
artists.append(ax.add_artist(ab))
ax.set_ylim([0,h])
ax.set_xlim([0,w])
plt.title('Estimated number of clusters: %d' % n_clusters)
plt.show()
import shutil
def save_clusters(labels, n_clusters, imgs, cls2ind=None, folder_name='cluster_result', path_save='/tmp', test=False):
counter_cls = dict()
path_ = os.path.join(
path_save,
folder_name
)
if os.path.exists(path_):
shutil.rmtree(path_)
os.mkdir(path_)
for k in range(n_clusters):
path_k = path_+ '/'+ str(k)
if not os.path.exists(path_k):
os.mkdir(path_k)
my_members = labels == k
idx = np.array(range(len(labels)))
idx_for_k = idx[my_members]
if not counter_cls.has_key(k):
counter_cls[k] = dict()
for im_i in range(idx_for_k.shape[0]):
im_p = imgs[idx_for_k[im_i]]
if test==False:
type_ = im_p.split('/')[-2]
else:
type_ = 'unknown'
if not counter_cls[k].has_key(type_):
counter_cls[k][type_] = 0
counter_cls[k][type_] += 1
img = imread(im_p)
img = resize(img, (int(img.shape[1]/3.),int(img.shape[0]/3.)), interpolation =cv2.INTER_CUBIC)
fil_n = im_p.split('/')[-1]
if cls2ind is not None:
fil_n = str(cls2ind[type_])+'_'+fil_n
else:
if test==False:
fil_n = type_+'_'+fil_n
cv2.imwrite(path_k+'/'+fil_n, img)
return counter_cls
def bandwith_and_meanshift(Data, quantile=0.2, n_samples_bdw=500):
bandwidth = estimate_bandwidth(Data, quantile, n_samples=n_samples_bdw)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(Data)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
return ms, labels, cluster_centers,n_clusters_
def kmeans(data, n_clusters=8, init='k-means++', n_init=10, max_iter=300, tol=0.0001, precompute_distances='auto', verbose=0, copy_x=True, n_jobs=1, algorithm='auto', seed=1):
np.random.seed(seed)
n_samples, n_features = data.shape
print("n_samples %d, \t n_features %d"
% (n_samples, n_features))
model = KMeans(
n_clusters=n_clusters,
max_iter=max_iter,
n_init=n_init,
init=init,
algorithm=algorithm,
random_state=seed,
verbose=verbose)
model.fit(data)
return model
def bench_cluster_model(estimator, name, data, sample_size, labels):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
def merge_cluster_by_groups(dict_groups, model=None, labels_=None, hogs=None, true_labels=None):
""" example of grouping
group2merge = {
0: [0,4,7,9,14],
1: [1,2,3,5,6,8,10,11,12,13]
}
every index is a label in cluster_model.labels_
"""
groups = dict()
if model is not None:
clust_labels = model.labels_
else:
clust_labels = labels_
# prendo gli indici corrispondenti ad ogni elemento del cluster
# e inserisco nel gruppo corrispondente
# gli stessi indici mi serviranno per prendere gli hog, le immagini, i labels
total_el = 0
for group in dict_groups:
groups[group] = []
for clust_idx in dict_groups[group]:
my_members = clust_labels == clust_idx
idx = np.array(range(len(clust_labels)))
idx_for_k = idx[my_members]
for idx_ in idx_for_k:
groups[group].append(idx_)
total_el+=1
if hogs is None and true_labels is None:
return groups
if hogs is not None:
hogs_by_group = dict()
for g in groups:
hogs_by_group[g] = []
for idx_ in groups[g]:
hogs_by_group[g].append(hogs[idx_])
if true_labels is None:
return groups, hogs_by_group
if true_labels is not None:
labels_by_group = dict()
for g in groups:
labels_by_group[g] = []
for idx_ in groups[g]:
labels_by_group[g].append(true_labels[idx_])
if hogs is None:
return groups, labels_by_group
return groups, hogs_by_group, labels_by_group
def prepare_features_by_group_on_clustered_imgs(
path_glob, clust_model, grouping_dict, save_clusters_=False, path_save=None, test=False,
ratio_resize = .75, h_resize=100, cls2ind=None):
"""
params:
- path_glob: the directory for loading images from in glob format [folder of images is class if not test]
- clust_model: the model for predict the clusters
- grouping_dict: the grouping criteria to apply to the clustered images
- save_clusters_: if True save thumbnail copy of images in tmp folder grouped by clusters
- path_save: if not None, dump all features inside the specified path
- ratio_resize: for hog features resize images with h = h_resize and w = h*ratio_resize, default .75
- h_resize: default 100
- cls2ind: if test is False, gets the folder name of the image and set true labels based on value in cls2ind
return:
- all_color_hists
- all_hogs
- labels
- groups
- hogs_by_group
- labels_by_group
"""
# get the list of images
imgs_list = glob(path_glob)
assert len(imgs_list)>0, "no imgs found"
# extract color histogram features
# on first image so can get the feature length
im_ex = cv2.imread(imgs_list[0])
color_hists_ex = extract_color_hist(im_ex)
all_color_hists = np.empty((len(imgs_list), color_hists_ex.shape[0]))
error_imgs = 0
error_imgs_idx = []
for i,im in enumerate(imgs_list):
img = cv2.imread(im)
if img is None:
error_imgs+=1
error_imgs_idx.append(i)
continue
all_color_hists[i]= extract_color_hist(img)
if i % 100 == 0:
print "done extracting color hist {}".format(i)
print "imgs_list size: {}".format(len(imgs_list))
for index in sorted(error_imgs_idx, reverse=True):
del imgs_list[index]
all_color_hists = np.delete(all_color_hists, error_imgs_idx, 0)
print "new imgs_list size: {}, color_hist_shape {}".format(len(imgs_list), all_color_hists.shape)
labels = clust_model.predict(all_color_hists)
if save_clusters_:
save_clusters(labels, clust_model.n_clusters, imgs_list, folder_name='cluster_test', test=test)
if path_save is not None:
du.mkdirs(path_save)
io_utils.dump(labels, path_save+'/labels.pkl')
io_utils.dump(all_color_hists, path_save+'/color_hists.pkl')
# extract hog features
ratio = ratio_resize
h = h_resize
w = int(h*ratio)
imscaled = resize(im_ex, (w,h))
hog_ex = skhog(imscaled[:,:,0].transpose(1,0), visualise=False)
all_hogs = np.empty((len(imgs_list), hog_ex.shape[0]))
if test is False:
true_labels = np.empty((len(imgs_list),))
else:
true_labels = None
for i,im in enumerate(imgs_list):
if test is False:
cls = im.split('/')[-2]
true_labels[i] = cls2ind[cls]
cvim = cv2.imread(im)
imscaled = resize(cvim, (w,h))
img = imscaled[:,:,0].transpose(1,0)
#print img.shape
all_hogs[i]= skhog(img)
if i % 100 == 0:
print "done extracting hog {}".format(i)
if path_save is not None:
io_utils.dump(all_hogs, path_save+'/hogs.pkl')
# now merge by specified grouping
if test is False:
groups, hogs_by_group, labels_by_group = merge_cluster_by_groups(grouping_dict, labels_=labels, hogs=all_hogs, true_labels=true_labels)
else:
groups, hogs_by_group = merge_cluster_by_groups(grouping_dict, labels_=labels, hogs=all_hogs, true_labels=None)
labels_by_group = []
tot_el = 0
for g in groups:
tot_el += len(groups[g])
assert tot_el == len(imgs_list), 'error in grouping, number in grouping {}, in imgs list {}'.format(tot_el, len(imgs_list))
return all_color_hists, all_hogs, labels, groups, hogs_by_group, labels_by_group, imgs_list
|
|
from functools import wraps
from django.conf import settings
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect
from prices import Money, TaxedMoney
from ..account.utils import store_user_address
from ..checkout import AddressType
from ..core.utils.taxes import (
ZERO_MONEY, get_tax_rate_by_name, get_taxes_for_address)
from ..core.weight import zero_weight
from ..dashboard.order.utils import get_voucher_discount_for_order
from ..discount.models import NotApplicable
from ..order import FulfillmentStatus, OrderStatus, emails
from ..order.models import Fulfillment, FulfillmentLine, Order, OrderLine
from ..payment import ChargeStatus
from ..payment.utils import gateway_refund, gateway_void
from ..product.utils import (
allocate_stock, deallocate_stock, decrease_stock, increase_stock)
from ..product.utils.digital_products import (
get_default_digital_content_settings)
def order_line_needs_automatic_fulfillment(line: OrderLine) -> bool:
"""Check if given line is digital and should be automatically fulfilled"""
digital_content_settings = get_default_digital_content_settings()
default_automatic_fulfillment = (
digital_content_settings['automatic_fulfillment'])
content = line.variant.digital_content
if default_automatic_fulfillment and content.use_default_settings:
return True
if content.automatic_fulfillment:
return True
return False
def order_needs_automatic_fullfilment(order: Order) -> bool:
"""Check if order has digital products which should be automatically
fulfilled"""
for line in order.lines.digital():
if order_line_needs_automatic_fulfillment(line):
return True
return False
def fulfill_order_line(order_line, quantity):
"""Fulfill order line with given quantity."""
if order_line.variant and order_line.variant.track_inventory:
decrease_stock(order_line.variant, quantity)
order_line.quantity_fulfilled += quantity
order_line.save(update_fields=['quantity_fulfilled'])
def automatically_fulfill_digital_lines(order: Order):
"""Fulfill all digital lines which have enabled automatic fulfillment
setting and send confirmation email."""
digital_lines = order.lines.filter(
is_shipping_required=False, variant__digital_content__isnull=False)
digital_lines = digital_lines.prefetch_related('variant__digital_content')
if not digital_lines:
return
fulfillment, _ = Fulfillment.objects.get_or_create(order=order)
for line in digital_lines:
if not order_line_needs_automatic_fulfillment(line):
continue
digital_content = line.variant.digital_content
digital_content.urls.create(line=line)
quantity = line.quantity
FulfillmentLine.objects.create(
fulfillment=fulfillment, order_line=line,
quantity=quantity)
fulfill_order_line(order_line=line, quantity=quantity)
emails.send_fulfillment_confirmation.delay(order.pk, fulfillment.pk)
def check_order_status(func):
"""Check if order meets preconditions of payment process.
Order can not have draft status or be fully paid. Billing address
must be provided.
If not, redirect to order details page.
"""
# pylint: disable=cyclic-import
from .models import Order
@wraps(func)
def decorator(*args, **kwargs):
token = kwargs.pop('token')
order = get_object_or_404(Order.objects.confirmed(), token=token)
if not order.billing_address or order.is_fully_paid():
return redirect('order:details', token=order.token)
kwargs['order'] = order
return func(*args, **kwargs)
return decorator
def update_voucher_discount(func):
"""Recalculate order discount amount based on order voucher."""
@wraps(func)
def decorator(*args, **kwargs):
if kwargs.pop('update_voucher_discount', True):
order = args[0]
try:
discount_amount = get_voucher_discount_for_order(order)
except NotApplicable:
discount_amount = ZERO_MONEY
order.discount_amount = discount_amount
return func(*args, **kwargs)
return decorator
@update_voucher_discount
def recalculate_order(order, **kwargs):
"""Recalculate and assign total price of order.
Total price is a sum of items in order and order shipping price minus
discount amount.
Voucher discount amount is recalculated by default. To avoid this, pass
update_voucher_discount argument set to False.
"""
# avoid using prefetched order lines
lines = [OrderLine.objects.get(pk=line.pk) for line in order]
prices = [line.get_total() for line in lines]
total = sum(prices, order.shipping_price)
# discount amount can't be greater than order total
order.discount_amount = min(order.discount_amount, total.gross)
if order.discount_amount:
total -= order.discount_amount
order.total = total
order.save()
recalculate_order_weight(order)
def recalculate_order_weight(order):
"""Recalculate order weights."""
weight = zero_weight()
for line in order:
if line.variant:
weight += line.variant.get_weight() * line.quantity
order.weight = weight
order.save(update_fields=['weight'])
def update_order_prices(order, discounts):
"""Update prices in order with given discounts and proper taxes."""
taxes = get_taxes_for_address(order.shipping_address)
for line in order:
if line.variant:
line.unit_price = line.variant.get_price(discounts, taxes)
line.tax_rate = get_tax_rate_by_name(
line.variant.product.tax_rate, taxes)
line.save()
if order.shipping_method:
order.shipping_price = order.shipping_method.get_total(taxes)
order.save()
recalculate_order(order)
def cancel_order(order, restock):
"""Cancel order and associated fulfillments.
Return products to corresponding stocks if restock is set to True.
"""
if restock:
restock_order_lines(order)
for fulfillment in order.fulfillments.all():
fulfillment.status = FulfillmentStatus.CANCELED
fulfillment.save(update_fields=['status'])
order.status = OrderStatus.CANCELED
order.save(update_fields=['status'])
payments = order.payments.filter(is_active=True).exclude(
charge_status=ChargeStatus.FULLY_REFUNDED)
for payment in payments:
if payment.can_refund():
gateway_refund(payment)
elif payment.can_void():
gateway_void(payment)
def update_order_status(order):
"""Update order status depending on fulfillments."""
quantity_fulfilled = order.quantity_fulfilled
total_quantity = order.get_total_quantity()
if quantity_fulfilled <= 0:
status = OrderStatus.UNFULFILLED
elif quantity_fulfilled < total_quantity:
status = OrderStatus.PARTIALLY_FULFILLED
else:
status = OrderStatus.FULFILLED
if status != order.status:
order.status = status
order.save(update_fields=['status'])
def cancel_fulfillment(fulfillment, restock):
"""Cancel fulfillment.
Return products to corresponding stocks if restock is set to True.
"""
if restock:
restock_fulfillment_lines(fulfillment)
for line in fulfillment:
order_line = line.order_line
order_line.quantity_fulfilled -= line.quantity
order_line.save(update_fields=['quantity_fulfilled'])
fulfillment.status = FulfillmentStatus.CANCELED
fulfillment.save(update_fields=['status'])
update_order_status(fulfillment.order)
def attach_order_to_user(order, user):
"""Associate existing order with user account."""
order.user = user
store_user_address(user, order.billing_address, AddressType.BILLING)
if order.shipping_address:
store_user_address(user, order.shipping_address, AddressType.SHIPPING)
order.save(update_fields=['user'])
@transaction.atomic
def add_variant_to_order(
order,
variant,
quantity,
discounts=None,
taxes=None,
allow_overselling=False,
track_inventory=True):
"""Add total_quantity of variant to order.
Returns an order line the variant was added to.
By default, raises InsufficientStock exception if quantity could not be
fulfilled. This can be disabled by setting `allow_overselling` to True.
"""
if not allow_overselling:
variant.check_quantity(quantity)
try:
line = order.lines.get(variant=variant)
line.quantity += quantity
line.save(update_fields=['quantity'])
except OrderLine.DoesNotExist:
product_name = variant.display_product()
translated_product_name = variant.display_product(translated=True)
if translated_product_name == product_name:
translated_product_name = ''
line = order.lines.create(
product_name=product_name,
translated_product_name=translated_product_name,
product_sku=variant.sku,
is_shipping_required=variant.is_shipping_required(),
quantity=quantity,
variant=variant,
unit_price=variant.get_price(discounts, taxes),
tax_rate=get_tax_rate_by_name(variant.product.tax_rate, taxes))
if variant.track_inventory and track_inventory:
allocate_stock(variant, quantity)
return line
def change_order_line_quantity(line, new_quantity):
"""Change the quantity of ordered items in a order line."""
if new_quantity:
line.quantity = new_quantity
line.save(update_fields=['quantity'])
else:
delete_order_line(line)
def delete_order_line(line):
"""Delete an order line from an order."""
line.delete()
def restock_order_lines(order):
"""Return ordered products to corresponding stocks."""
for line in order:
if line.variant and line.variant.track_inventory:
if line.quantity_unfulfilled > 0:
deallocate_stock(line.variant, line.quantity_unfulfilled)
if line.quantity_fulfilled > 0:
increase_stock(line.variant, line.quantity_fulfilled)
if line.quantity_fulfilled > 0:
line.quantity_fulfilled = 0
line.save(update_fields=['quantity_fulfilled'])
def restock_fulfillment_lines(fulfillment):
"""Return fulfilled products to corresponding stocks."""
for line in fulfillment:
if line.order_line.variant and line.order_line.variant.track_inventory:
increase_stock(
line.order_line.variant, line.quantity, allocate=True)
def sum_order_totals(qs):
zero = Money(0, currency=settings.DEFAULT_CURRENCY)
taxed_zero = TaxedMoney(zero, zero)
return sum([order.total for order in qs], taxed_zero)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""
A pass for manifesting explicit memory allocations.
"""
from typing import Optional, Dict, List, Tuple
from collections import defaultdict
import attr
from ..expr_functor import ExprMutator
from .. import op, expr
from ..function import Function
from ... import register_func, ir, cpu
from ..._ffi.runtime_ctypes import Device
from ... import IRModule
from .. import transform
from . import function_pass
def is_primitive(call):
return (
hasattr(call, "op")
and hasattr(call.op, "attrs")
and hasattr(call.op.attrs, "Primitive")
and int(call.op.attrs.Primitive) == 1
)
@attr.s(auto_attribs=True)
class Region:
"""
Represents a control-free allocation region.
The below pass groups sets of allocations into regions,
then replaces the region with a single allocation.
"""
var: expr.Var
size: expr.Expr
alignment: Optional[expr.Expr]
dtype: Optional[str]
device: Device
offsets: Dict[expr.Var, Tuple[expr.Expr, expr.Expr]]
@staticmethod
def empty(region_no):
zero = expr.const(0, dtype="int64")
assert len(zero.data.shape) == 0
region_var = expr.var(f"region{region_no}")
return Region(region_var, zero, None, None, None, {})
def grow(
self,
old_storage: expr.Var,
size: expr.Expr,
alignment: expr.Expr,
dev: Device,
dtype: str,
) -> None:
"""Grow the region by a given allocation as well as track the old storage
for later rewriting the program to use the allocated region.
"""
if self.dtype:
assert self.dtype == dtype, "must have matching dtypes in a region"
else:
self.dtype = dtype
if self.alignment:
assert ir.structural_equal(
self.alignment, alignment
), "must have matching alignments in a region"
else:
self.alignment = alignment
if self.device:
assert (
self.device.device_type == dev.device_type
and self.device.device_id == dev.device_id
), "must have matching device"
else:
assert dev
self.device = dev
new_size = (
(size + self.alignment - expr.const(1, "int64")) / self.alignment * self.alignment
)
# Record the offset at which we allocate the storage.
offset_var: expr.RelayExpr = expr.var(f"offset{len(self.offsets)}")
self.offsets[old_storage] = (offset_var, self.size)
self.size = self.size + new_size
def offset_for(self, alloc: expr.Expr) -> expr.Expr:
return self.offsets.get(alloc, [None])[0]
def to_expr(self, body: expr.Expr) -> expr.Expr:
"""
Generate the prelude code for a region, wrapping the body in it.
The prelude contains the single allocation for a region, and
all offset computations.
"""
if self.device is None:
self.device = cpu(0)
# Generate bindings for each and every size computation
# we must do this to maintain ANF.
bindings: List[Tuple[expr.Expr, expr.Expr]] = []
# First compute the total size.
total_size = expr.var(f"total_size{hash(body)}")
bindings.append((total_size, self.size))
# Allocate the entire region with a single call.
alloc = op.memory.alloc_storage(total_size, self.alignment, self.device, self.dtype)
bindings.append((self.var, alloc))
# Generate variables which contain all of the offset math.
# Ensure we constant evaluate away all the math here.
#
# In theory we can support dynamic offsets but this
# requires another round of memory planning and
# potentially colaescing.
for alloc in self.offsets:
(var, offset) = self.offsets[alloc]
bindings.append((var, offset))
body = mk_let(bindings, body)
return body
def iterative_let(let, each_binding, kont):
bindings = []
while isinstance(let, expr.Let):
lhs = let.var
rhs = let.value
bindings.append(each_binding(lhs, rhs))
let = let.body
return kont(bindings, let)
def mk_let(bindings, body):
for var, value in reversed(bindings):
assert var
assert value
assert body
body = expr.Let(var, value, body)
return body
def const_eval(mod, exp):
mod = IRModule.from_expr(exp, type_defs=mod.type_definitions)
mod = transform.FoldConstant()(mod)
return mod["main"]
class StorageCoalesce(ExprMutator):
"""
A pass for coalescing allocations into region/arena allocations.
After this pass each allocation comes from the same backing storage,
but will never overlap even in time, i.e. the allocations are just
packed into a contiguous block of memory.
A secondary part of memory planning will perform liveness analysis to
overlap these in time, i.e when an early tensor dies we will attempt
to reuse its slot.
"""
def __init__(self):
super().__init__()
self.regions = []
def enter_scope(self) -> None:
region_no = len(self.regions)
self.regions.append(defaultdict(lambda: Region.empty(region_no)))
def exit_scope(self, body: expr.Expr) -> expr.Expr:
"""When leaving a scope build a region allocation for the scope."""
dtype_region = self.regions.pop()
for _, region in reversed(list(dtype_region.items())):
if len(region.offsets) != 0:
body = region.to_expr(body)
return body
def current_region(self, dtype) -> Region:
current_scope = self.regions[-1]
return current_scope[dtype]
def new_region_and_offset(self, old_storage):
for dtype_region in reversed(self.regions):
for dtype in dtype_region:
region = dtype_region[dtype]
offset = region.offset_for(old_storage)
if offset:
return region, offset
raise Exception("could not find offset in any valid region")
def visit_function(self, fn):
"""Transform the function body to use region allocation scheme."""
func = fn
if getattr(func.attrs, "Primitive", 0) == 1:
return super().visit_function(func)
else:
self.enter_scope()
body = self.visit(func.body)
body = self.exit_scope(body)
return Function(
func.params,
body,
func.ret_type,
func.type_params,
func.attrs,
)
def visit_if(self, ite):
self.enter_scope()
true_branch = self.visit(ite.true_branch)
true_branch = self.exit_scope(true_branch)
self.enter_scope()
false_branch = self.visit(ite.false_branch)
false_branch = self.exit_scope(false_branch)
return expr.If(ite.cond, true_branch, false_branch)
def mk_let(self, dynamic_regions):
"""Let bind the dynamic regions"""
def _mk_let(bindings, body):
for var, value in reversed(bindings):
assert var
assert value is not None
assert body
body = expr.Let(var, value, body)
if var in dynamic_regions:
body = self.exit_scope(body)
return body
return _mk_let
def visit_let(self, let):
dynamic_regions = []
def _each_binding(lhs, rhs):
if isinstance(rhs, expr.Call) and rhs.op == op.op.get("memory.alloc_storage"):
return self.process_alloc_storage(dynamic_regions, lhs, rhs)
elif isinstance(rhs, expr.Call) and rhs.op == op.op.get("memory.alloc_tensor"):
return self.process_alloc_tensor(lhs, rhs)
else:
return lhs, rhs
result = iterative_let(let, _each_binding, self.mk_let(dynamic_regions))
assert result
return result
def process_alloc_storage(self, dynamic_regions, lhs, call):
"""Process alloc_storage"""
size, alignment = call.args
dtype = call.attrs.dtype
dev = Device(call.attrs.device_type, call.attrs.device_id)
if not isinstance(size, expr.Constant):
self.enter_scope()
dynamic_regions.append(lhs)
else:
# A new scope is created when entering a new region with different
# device device.
region = self.current_region(dtype)
if region.device and region.device.device_type != dev.device_type:
self.enter_scope()
dynamic_regions.append(lhs)
region = self.current_region(dtype)
region.grow(lhs, size, alignment, dev, dtype)
return lhs, region.var
def process_alloc_tensor(self, lhs, call):
"""Process alloc tensor. Region and offset are computed"""
storage, old_offset, shape = call.args
region, offset = self.new_region_and_offset(storage)
assert old_offset.data.numpy().item() == 0, "no offsets should yet be allocated"
return (
lhs,
expr.Call(call.op, [region.var, offset, shape], call.attrs),
)
class LiftConst(ExprMutator):
"""An internal pass to lift constants to the top level of function."""
def __init__(self):
self.i = 0
self.constants = []
self.top_level = True
super().__init__()
def visit_constant(self, const):
var = expr.var(f"const{self.i}")
self.i += 1
self.constants.append((var, const))
return var
def visit_function(self, fn):
if int(getattr(fn.attrs, "Primitive", 0)) == 1:
return fn
outer_constant = self.constants
self.constants = []
# Populates self.constants.
body = self.visit(fn.body)
body = mk_let(self.constants, body)
self.constants = outer_constant
return Function(fn.params, body, fn.ret_type, fn.type_params, fn.attrs)
def visit_let(self, let):
bindings = []
while isinstance(let, expr.Let):
new_var = self.visit(let.var)
new_val = self.visit(let.value)
bindings.append((new_var, new_val))
let = let.body
new_body = self.visit(let)
return mk_let(bindings, new_body)
@function_pass(opt_level=0)
class MemoryPlan:
"""An explicit pass wrapper around StorageCoalesce."""
def transform_function(self, func, mod, _):
mod.import_from_std("core.rly")
sc = StorageCoalesce()
func = sc.visit(func)
return func
register_func("relay.transform.MemoryPlan", MemoryPlan)
@function_pass(opt_level=0)
class LiftConstants:
"""An explicit pass wrapper around LiftConst."""
def transform_function(self, func, mod, _):
mod.import_from_std("core.rly")
func = LiftConst().visit(func)
return func
register_func("relay.transform.LiftConstants", LiftConstants)
|
|
#----------------------------------------------------------------------------
# Classes for new input type `desStarCatalog`; intended for use with Balrog,
# but should be extendable.
#
# Contributors:
# Spencer Everett (UCSC)
#----------------------------------------------------------------------------
import galsim
import galsim.config
import numpy as np
import os
import warnings
import logging
from past.builtins import basestring # Python 2&3 compatibility
from astropy.io import ascii
# Can use for debugging
# import pudb
class desStarCatalog(object):
""" Class that handles Sahar's star catalogs for DES. These are cvs files with names typically
of the form #TODO.
# @param base_dir Location of all tile-specific catalog directories.
# @param model_type A string of which star catalog model type to use
(e.g. 'Extra_10_percent_16.5-26.5').
# @param tile Which catalog tile to load.
# @param bands A string of the desired bands to simulate from (only griz allowed). For
# example, selecting only the 'g' and 'r' bands would be done by setting
# bands='gr'.
# @param snr_min The lower allowed bound for signal-to-noise ratio (snr). Can be any
# positive value, as long as it is smaller than `snr_max`. All objects with
# negative snr are removed by default.
# @param file_type Which file extension is used for the star catalogs (not needed yet, is csv)
# between versions is different (not needed yet)
# @param data_version Specify which version of the catalog is being used if the processing
# between versions is different (not needed yet)
# @param zeropoint Reference zeropoint used for star catalog (Default: 30)
# objects below this cutoff will be removed. (Default: `t_frac=0.5`).
# @param base_model Used for Balrog simulations where multiple model types will be
# needed accross many realizations of images. Must be a valid model type
# and must be a divisor of the selected model type.
# For example, a simulation with 10 realizations will need to use a model
# type of `Extra_%_percent` where % is a multiple of 10 up to 100.
# So the base_model would be 'Extra_10_percent'.
# @param _nobjects_only This is only passed if GalSim wants to know how many input objects will
be used without processing the whole input catalog.
"""
_req_params = { 'base_dir' : str, 'model_type' : str, 'tile' : str, 'bands': str}
# TODO: any others?
_opt_params = { 'file_type' : str, 'data_version' : str, 'zeropoint' : float,
'base_model' : str}
_single_params = []
_takes_rng = False
# TODO: Shouldn't be hard to hadd fits, but currently not needed
_valid_file_types = ['cvs']
# Data versions that have currently have implementations
_valid_data_versions = ['y3v02']
# From Sahar's catalog directory, currently in `/data/des20.b/data/sallam/Yanny-balrog/5sigma/`
# NOTE: Gets set below after determining data version
_valid_model_types = []
# Only these color bands are currently supported for star injection
_valid_band_types = 'griz'
# Dictionary of color band flux to array index in star catalog
_band_index = {'g' : 0, 'r' : 1, 'i' : 2, 'z' : 3}
def __init__(self, base_dir, model_type, tile, bands, file_type=None, data_version=None,
zeropoint=None, base_model=None):
if not os.path.isdir(base_dir):
raise ValueError('{} is not a valid directory or does not exist!'.format(base_dir))
self.base_dir = base_dir
if not data_version:
warnings.warn('No data version passed - assuming `y3v02`.')
data_version = 'y3v02'
if data_version not in self._valid_data_versions:
raise ValueError('`{}` does not have an implementation built yet!'.format(
self.data_version))
self.data_version = data_version
self._set_valid_model_types()
# TODO: Would be nice to have a check for valid DES tile names!
# tile check...
self.tile = tile
if model_type not in self._valid_model_types:
raise ValueError('{} is not a valid model type! '.format(model_type) +
'Currently allowed types are {}'.format(self._valid_model_types))
self.model_type = model_type
# NOTE: This parameter is only used for Balrog simulations where multiple model types will be
# needed accross many 'realizations' of images. Must also be a valid model type, and must be a
# divisor of the selected model type.
#
# For example, a simulation with 10 realizations will need to use a model type of
# `Extra_%_percent` where % is a multiple of 10 up to 100. So the base_model would be
# `Extra_10_percent`.
if base_model:
if base_model not in self._valid_model_types:
raise ValueError('Base model {} is not a valid model type! '.format(model_type) +
'Currently allowed types are {}'.format(self._valid_model_types))
prefix = base_model.split('_')[0]
if prefix == 'Model':
self.base_percent = 100
elif prefix == 'Extra':
self.base_percent = base_model.split('_')[1]
self.base_model = base_model
else:
self.base_model, self.base_percent = None, None
if isinstance(bands, basestring):
# Strip all whitespace
bands = bands.replace(' ', '')
# More useful as a list of individual bands
bands_list = list(bands)
if set(bands_list).issubset(self._valid_band_types):
self.bands = bands_list
else:
raise ValueError("The only valid color bands for a des star catalog are \'griz'\'!")
else:
# TODO: Wouldn't be a bad idea to allow a list of individual bands as well
raise ValueError('Must enter desired color bands as a string!' +
' (For example, `bands : \'gr\'`')
if file_type:
if file_type not in self._valid_file_types:
raise ValueError('{} is not a valid file type! '.format(file_type) +
'Currently allowed types are {}'.format(self._valid_file_types))
else:
# Default is csv
self.file_type = 'csv'
if not zeropoint:
if self.data_version == 'y3v02':
# Catalogs made w/ zp of 30
self.zeropoint = 30.0
else:
# In future, may be different defaults
warnings.warn('No zeropoint passed; using default value of 30.0')
self.zeropoint = 30.0
# self._setup_files()
self._read_catalog()
return
#------------------------------------------------------------------------------------------------
def _set_valid_model_types(self):
'''
Given data version, construct the allowed model types from Sahar's star catalogs.
'''
# Uses public function to allow external use
self._valid_model_types = return_valid_model_types(data_version=self.data_version)
return
def _read_catalog(self):
'''
Setup file directory structure given base directory and model type.
Load in the star catalog for the given model type and tile name.
'''
if self.data_version == 'y3v02':
self.model_dir = os.path.join(self.base_dir, self.model_type)
filename = 'Model_{}.{}'.format(self.tile, self.file_type)
self.cat_file = os.path.join(self.model_dir, filename)
if self.file_type == 'csv':
# TODO: I believe GalSim has a built in CSV reader; worth looking at
catalog = ascii.read(self.cat_file, format='csv')
# NB: Copied due to possible memory leak issue; discussed in `scene.py`
self.catalog = np.array(catalog, copy=True)
# Total objects before cuts
self.ntotal = len(self.catalog)
# Star indices in original star catalog
self.orig_index = np.arange(self.ntotal)
# Get flags and create mask
self._set_flags()
self._make_mask()
# Do mask cut
self._mask_cut()
# pudb.set_trace()
return
#------------------------------------------------------------------------------------------------
def _set_flags(self):
if self.data_version == 'y3v02':
# No flags in current catalogs
self.flags = None
return
#------------------------------------------------------------------------------------------------
def _make_mask(self):
"""Add a masking procedure, if desired."""
# TODO: Allow multiple masking procedures
# TODO works as currently written, but should rewrite to conform to conventional masking
# definition
mask = np.ones(len(self.orig_index), dtype=bool)
if self.data_version == 'y3v02':
# No mask cuts in this version
pass
self.mask = mask
return
#------------------------------------------------------------------------------------------------
def _mask_cut(self):
"""Do mask cut defined in `makeMask()`."""
self.catalog = self.catalog[self.mask]
self.orig_index = self.orig_index[self.mask]
self.nobjects = len(self.orig_index)
# print('Ntotal (stars): {}\nNobjects (stars): {}'.format(self.ntotal,self.nobjects))
return
#------------------------------------------------------------------------------------------------
def make_stars(self, band, index=None, n_random=None, rng=None, gsparams=None):
"""
Construct GSObjects from a list of stars in the des star catalog specified by `index`
(or a randomly generated one).
NOTE: If `image` type is set to `Balrog`, then there will be different behaviour. Will
inject *all* stars whose positions are in the current image, *at* that position!
@param index Index of the desired star in the catalog for which a GSObject
should be constructed. You can also provide a list or array of
indices, in which case a list of objects is returned. If None,
then a random star (or more: see n_random kwarg) is chosen,
correcting for catalog-level selection effects if weights are
available. [default: None]
@param n_random The number of random stars to build, if 'index' is None.
[default: 1 (set below)]
@param rng A random number generator to use for selecting a random star
(may be any kind of BaseDeviate or None) and to use in generating
any noise field when padding. [default: None]
@param gsparams An optional GSParams argument. See the docstring for GSParams for
details. [default: None]
"""
if band not in self._valid_band_types:
raise ValueError('Band {} is not a valid band type for an ngmix catalog!'.format(band))
# Make rng if needed
if index is None:
if rng is None:
rng = galsim.BaseDeviate()
elif not isinstance(rng, galsim.BaseDeviate):
raise TypeError("The rng provided to make_stars is not a BaseDeviate")
# Select random indices if necessary (no index given).
if index is None:
if n_random is None: n_random = 1
index = self.selectRandomIndex(n_random, rng=rng)
else:
# n_random is set to None by default instead of 1 for this check
if n_random is not None:
import warnings
warnings.warn("Ignoring input n_random, since indices were specified!")
# Format indices correctly
if hasattr(index, '__iter__'):
indices = index
else:
indices = [index]
stars = []
for index in indices:
star = self.star2gs(index, band, gsparams)
stars.append(star)
# Store the orig_index as star.index
for star, idx in zip(stars, indices):
star.index = self.orig_index[idx]
# Only return a list if there are multiple GSObjects
if hasattr(index, '__iter__'):
return stars
else:
return stars[0]
#------------------------------------------------------------------------------------------------
def star2gs(self, index, band, gsparams=None):
if (gsparams is not None) and (not isinstance(gsparams, galsim.GSParams)):
if isinstance(gsparams, dict):
# Convert to actual gsparams object
gsparams = galsim.GSParams(**gsparams)
else:
raise TypeError('Only `dict` and `galsim.GSParam` types allowed for '
'gsparams; input has type of {}.'.format(type(gsparams)))
# List of individual band GSObjects
gsobjects = []
# NOTE: Used to iterate over all desired bands here, but this feature has
# been removed as it is not useful and led to the possiblity of unintended bugs
# for band in self.bands:
if self.data_version == 'y3v02':
# Needed for all mag calculations
gmag = self.catalog['g_Corr'][index]
# Grab current band magnitude
if band == 'g':
mag = gmag
elif band == 'r':
mag = gmag - self.catalog['gr_Corr'][index]
elif band == 'i':
mag = gmag - self.catalog['gr_Corr'][index] - self.catalog['ri_Corr'][index]
elif band == 'z':
mag = gmag - self.catalog['gr_Corr'][index] - self.catalog['ri_Corr'][index] \
- self.catalog['iz_Corr'][index]
else:
raise ValueError('Band {} is not an allowed band input '.format(band) +
'for data_version of {}!'.format(self.data_version))
# Now convert to flux
flux = np.power(10.0, 0.4 * (self.zeropoint - mag))
# (star cats calibrated at zp=30)
# NOTE: Will just use `scale_flux` parameter in galsim config for now
# flux_factor = ...
# Stars are treated as a delta function, to be convolved w/ set PSF
gs_star = galsim.DeltaFunction(flux)
else:
# Should already be checked by this point, but just to be safe:
raise ValueError('There is no implementation for `star2gs` for data_version ' +
'of {}'.format(self.data_version))
return gs_star
#------------------------------------------------------------------------------------------------
@staticmethod
def _make_single_star(des_star_catalog, index, rng=None, gsparams=None):
""" A static function that mimics the functionality of make_stars() for single index.
The only point of this class is to circumvent some serialization issues. This means it can be used
through a proxy desStarCatalog object, which is needed for the config layer.
"""
# TODO: Write the static version of make_stars! (We don't need it for prototyping Balrog, however)
pass
#------------------------------------------------------------------------------------------------
def selectRandomIndex(self, n_random=1, rng=None, _n_rng_calls=False):
"""
Routine to select random indices out of the catalog. This routine does a weighted random
selection with replacement (i.e., there is no guarantee of uniqueness of the selected
indices). Weighting uses the weight factors available in the catalog, if any; these weights
are typically meant to remove any selection effects in the catalog creation process.
@param n_random Number of random indices to return. [default: 1]
@param rng A random number generator to use for selecting a random star.
(may be any kind of BaseDeviate or None). [default: None]
@returns A single index if n_random==1 or a NumPy array containing the randomly-selected
indices if n_random>1.
"""
# Set up the random number generator.
if rng is None:
rng = galsim.BaseDeviate()
# QSTN: What is the weighting scheme for des star catalogs? Will need to adjust below code
# to match (or exclude entierly)
if hasattr(self.catalog, 'weight'):
use_weights = self.catalog.weight[self.orig_index]
else:
import warnings
warnings.warn('Selecting random object without correcting for catalog-level selection effects.')
use_weights = None
# By default, get the number of RNG calls. Then decide whether or not to return them
# based on _n_rng_calls.
index, n_rng_calls = galsim.utilities.rand_with_replacement(
n_random, self.nobjects, rng, use_weights, _n_rng_calls=True)
if n_random>1:
if _n_rng_calls:
return index, n_rng_calls
else:
return index
else:
if _n_rng_calls:
return index[0], n_rng_calls
else:
return index[0]
#------------------------------------------------------------------------------------------------
def indices_in_region(self, rlims, dlims, boundary_cross=False):
'''
Returns the indices of all stars contained within the ra/dec limits.
`boundary_cross` is passed as r1 > r2 in `Y3A2_COADDTILE_GEOM.fits`
if the tile passes over the 0/360 degree boundary.
'''
# pudb.set_trace()
r1, r2 = rlims[0], rlims[1]
d1, d2 = dlims[0], dlims[1]
if not boundary_cross:
assert r1 < r2
indices = np.where( (self.catalog['RA_new']>r1) &
(self.catalog['RA_new']<r2) &
(self.catalog['DEC_new']>d1) &
(self.catalog['DEC_new']<d2) )[0]
else:
# Account for positions crossing accross RA=0/360
assert r1 > r2
indices = np.where(
( (self.catalog['RA_new']>=r1) & (self.catalog['RA_new']<360) ) | # r1<ra<360
( (self.catalog['RA_new']>= 0) & (self.catalog['RA_new']<=r2) ) & # 0=<ra<r2
( (self.catalog['DEC_new']>d1) & (self.catalog['DEC_new']<d2) ) )[0]
return indices
#------------------------------------------------------------------------------------------------
def getNObjects(self):
# Used by input/logger methods
return self.nobjects
def getNTot(self):
# Used by input/logger methods
return self.ntotal
def getCatalog(self, indices=None):
if indices is None:
return self.catalog
else:
return self.catalog[indices]
#------------------------------------------------------------------------------------------------
# Since make_stars is a function, not a class, it needs to use an unconventional location for defining
# certain config parameters.
make_stars._req_params = {'band' : str}
make_stars._opt_params = {'index' : int,
'n_random': int}
make_stars._single_params = []
make_stars._takes_rng = True
#####------------------------------------------------------------------------------------------------
# Helper Functions
def return_valid_model_types(data_version='y3v02'):
'''
Useful to have this separate from _set_valid_model_types() for outside use.
'''
if data_version == 'y3v02':
# There are the full-density models...
valid_model_types = ['Model', 'Model_16.5-26.5', 'Model_16.5-27.5']
# ...and the 'extra' density models, which are partitioned by percentage
percents = np.arange(10, 110, 10, dtype=int)
for per in percents:
valid_model_types.append('Extra_{}_percent'.format(per))
valid_model_types.append('Extra_{}_percent_16.5-26.5'.format(per))
valid_model_types.append('Extra_{}_percent_16.5-27.5'.format(per))
else:
raise ValueError('{} does not have an implementation built yet!'.format(data_version))
return valid_model_types
#####------------------------------------------------------------------------------------------------
class desStarCatalogLoader(galsim.config.InputLoader):
"""
The desStarCatalog loader doesn't need anything special other than registration as a valid input type.
These additions are only used for logging purposes.
"""
def setupImage(self, des_star_catalog, config, base, logger):
# This method is blank for a general InputLoader, and a convenient place to put the logger
if logger: # pragma: no cover
# Only report as a warning the first time. After that, use info.
first = not base.get('_desStarCatalogLoader_reported_as_warning',False)
base['_desStarCatalogLoader_reported_as_warning'] = True
if first:
log_level = logging.WARNING
else:
log_level = logging.INFO
if 'input' in base:
if 'des_star_catalog' in base['input']:
cc = base['input']['des_star_catalog']
if isinstance(cc,list): cc = cc[0]
out_str = ''
if 'base_dir' in cc:
out_str += '\n dir = %s'%cc['base_dir']
if 'model_type' in cc:
out_str += '\n model_type = %s'%cc['model_type']
if out_str != '':
logger.log(log_level, 'Using user-specified desStarCatalog: %s',out_str)
logger.info('file %d: DES star catalog has %d total objects; %d passed initial cuts.',
base['file_num'], des_star_catalog.getNTot(),des_star_catalog.getNObjects())
# Need to add the desStarCatalog class as a valid input_type.
galsim.config.RegisterInputType('des_star_catalog', desStarCatalogLoader(desStarCatalog,
has_nobj=True))
#####------------------------------------------------------------------------------------------------
def build_desStar(config, base, ignore, gsparams, logger):
'''
Build a desStar type GSObject from user input.
NOTE: If `image` type is set to `Balrog`, then there will be different behaviour. Will
inject *all* stars whose positions are in the current image, *at* that position!
'''
des_star_cat = galsim.config.GetInputObj('des_star_catalog', config, base, 'des_star_catalog')
# If stars are selected based on index, and index is Sequence or Random, and max
# isn't set, set it to nobjects-1.
if 'index' in config:
galsim.config.SetDefaultIndex(config, des_star_cat.getNObjects())
# Grab necessary parameters
req = desStarCatalog.make_stars._req_params
opt = desStarCatalog.make_stars._opt_params
single = desStarCatalog.make_stars._single_params
ignore = ignore + ['num']
kwargs, safe = galsim.config.GetAllParams(config, base, req=req, opt=opt, single=single,
ignore=ignore)
# Guaranteed to be present as it is in _req_params
band = kwargs['band']
# Convert gsparams from a dict to an actual GSParams object
if gsparams:
kwargs['gsparams'] = galsim.GSParams(**gsparams)
else:
gsparams = None
# This handles the case of no index passed in config file
# Details are in desStarCatalog
rng = None
if 'index' not in kwargs:
rng = galsim.config.GetRNG(config, base, logger, 'DES_Star')
kwargs['index'], n_rng_calls = des_star_cat.selectRandomIndex(1, rng=rng, _n_rng_calls=True)
# Make sure this process gives consistent results regardless of the number of processes
# being used.
if not isinstance(des_star_cat, desStarCatalog) and rng is not None:
# Then des_star_cat is really a proxy, which means the rng was pickled, so we need to
# discard the same number of random calls from the one in the config dict.
rng.discard(int(n_rng_calls))
# Check that inputted/set index is valid
index = kwargs['index']
if index >= des_star_cat.getNObjects():
raise IndexError("%s index has gone past the number of entries in the catalog"%index)
logger.debug('obj %d: DES_Star kwargs = %s',base.get('obj_num',0),kwargs)
kwargs['des_star_catalog'] = des_star_cat
# NOTE: This uses a static method of desStarCatalog to save memory. Not needed for the moment, but
# worth looking into when trying to save resources for large Balrog runs
# star = des_star_catalog._make_single_star(**kwargs)
# Create GSObject stars from the star catalog
des_stars = des_star_cat.make_stars(band, index=index, gsparams=gsparams)
# The second item is "safe", a boolean that declares whether the returned value is
# safe to save and use again for later objects (which is not the case for des_stars).
return des_stars, False
# Register this builder with the config framework:
galsim.config.RegisterObjectType('desStar', build_desStar, input_type='des_star_catalog')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that produces a callable object that executes a TensorFlow graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import tape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _default_initializer(name, shape, dtype):
"""The default initializer for variables."""
# pylint: disable=protected-access
store = variable_scope._get_default_variable_store()
initializer = store._get_default_initializer(name, shape=shape, dtype=dtype)
# pylint: enable=protected-access
return initializer[0]
class _CapturedVariable(object):
"""Variable captured by graph_callable.
Internal to the implementation of graph_callable. Created only by
_VariableCapturingScope and used only to read the variable values when calling
the function after the variables are initialized.
"""
def __init__(self, name, initializer, shape, dtype, trainable):
self.name = name
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
initial_value = lambda: initializer(shape, dtype=dtype)
with context.eager_mode():
self.variable = resource_variable_ops.ResourceVariable(
initial_value=initial_value, name=name, dtype=dtype,
trainable=trainable)
self.shape = shape
self.dtype = dtype
self.placeholder = None
self.trainable = trainable
def read(self, want_gradients=True):
if want_gradients and self.trainable:
v = tape.watch_variable(self.variable)
else:
v = self.variable
return v.read_value()
class _VariableCapturingScope(object):
"""Variable-scope-like object which captures tf.get_variable calls.
This is responsible for the main difference between the initialization version
of a function object and the calling version of a function object.
capturing_scope replaces calls to tf.get_variable with placeholder tensors to
be fed the variable's current value. TODO(apassos): these placeholders should
instead be objects implementing a similar API to tf.Variable, for full
compatibility.
initializing_scope replaces calls to tf.get_variable with creation of
variables and initialization of their values. This allows eventual support of
initialized_value and friends.
TODO(apassos): once the eager mode layers API is implemented support eager
func-to-object as well.
"""
def __init__(self):
self.variables = {}
self.tf_variables = {}
@contextlib.contextmanager
def capturing_scope(self):
"""Context manager to capture variable creations.
Replaces variable accesses with placeholders.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter( # pylint: disable=missing-docstring
getter=None,
name=None,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None,
validate_shape=True,
use_resource=None,
aggregation=variable_scope.VariableAggregation.NONE,
synchronization=variable_scope.VariableSynchronization.AUTO):
del getter, regularizer, partitioner, validate_shape, use_resource, dtype
del collections, initializer, trainable, reuse, caching_device, shape
del aggregation, synchronization
assert name in self.variables
v = self.variables[name]
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
@contextlib.contextmanager
def initializing_scope(self):
"""Context manager to capture variable creations.
Forcibly initializes all created variables.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter( # pylint: disable=missing-docstring
getter=None,
name=None,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None,
validate_shape=True,
use_resource=None,
aggregation=variable_scope.VariableAggregation.NONE,
synchronization=variable_scope.VariableSynchronization.AUTO):
del getter, regularizer, collections, caching_device, partitioner
del use_resource, validate_shape, aggregation, synchronization
if name in self.tf_variables:
if reuse:
return self.tf_variables[name].initialized_value()
else:
raise ValueError("Specified reuse=%s but tried to reuse variables."
% reuse)
# TODO(apassos): ensure this is on the same device as above
v = _CapturedVariable(name, initializer, shape, dtype, trainable)
self.variables[name] = v
graph_mode_resource = v.variable.handle
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
resource_variable_ops.shape_safe_assign_variable_handle(
graph_mode_resource, v.variable.shape, initializer(shape, dtype))
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
class _InitializingFunctionObject(object):
"""Responsible for deciding which version of func-to-object to call.
call_fn is the version which calls the function with the current values of the
variables and init_fn is the version which calls the function to initialize
all variables.
TODO(apassos): figure out a way to support initializing only _some_
variables. This requires a way to pull out a variable's initialization code
from the graph, which might not be possible in general.
"""
def __init__(self, call_fn, init_fn, shape_and_dtypes):
self._init_fn = init_fn
self._call_fn = call_fn
self.shape_and_dtypes = shape_and_dtypes
self.flattened_shapes = [tensor_shape.as_shape(sd.shape) for sd in
nest.flatten(self.shape_and_dtypes)]
@property
def variables(self):
return self._call_fn.variables
def __call__(self, *args):
nest.assert_same_structure(self.shape_and_dtypes, args, check_types=False)
if not all([
shape.is_compatible_with(arg.shape)
for shape, arg in zip(self.flattened_shapes, nest.flatten(args))
]):
raise ValueError(
"Declared shapes do not match argument shapes: Expected %s, found %s."
% (self.flattened_shapes, [arg.shape for arg in nest.flatten(args)]))
initialized = [resource_variable_ops.var_is_initialized_op(
v.handle).numpy() for v in self._call_fn.variables]
if all(x for x in initialized):
for v in self._call_fn.variables:
if v.trainable:
tape.watch_variable(v)
return self._call_fn(*args)
elif all(not x for x in initialized):
return self._init_fn(*args)
else:
raise ValueError("Some, but not all, variables are initialized.")
def _get_graph_callable_inputs(shape_and_dtypes):
"""Maps specified shape_and_dtypes to graph inputs."""
ret = []
for x in shape_and_dtypes:
if isinstance(x, ShapeAndDtype):
ret.append(array_ops.placeholder(x.dtype, x.shape))
elif isinstance(x, (tuple, list)):
ret.append(_get_graph_callable_inputs(x))
else:
raise errors.InvalidArgumentError(
None, None, "Expected the argument to @graph_callable to be a "
"(possibly nested) list or tuple of ShapeAndDtype objects, "
"but got an object of type: %s" % type(x))
return tuple(ret) if isinstance(shape_and_dtypes, tuple) else ret
def _graph_callable_internal(func, shape_and_dtypes):
"""Defines and returns a template version of func.
Under the hood we make two function objects, each wrapping a different version
of the graph-mode code. One version immediately runs variable initialization
before making the variable's Tensors available for use, while the other
version replaces the Variables with placeholders which become function
arguments and get the current variable's value.
Limitations in (2) and (4) are because this does not implement a graph-mode
Variable class which has a convert_to_tensor(as_ref=True) method and a
initialized_value method. This is fixable.
Args:
func: The tfe Python function to compile.
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects.
Raises:
ValueError: If any one of func's outputs is not a Tensor.
Returns:
Callable graph object.
"""
container = tf_ops.get_default_graph()._container # pylint: disable=protected-access
graph_key = tf_ops.get_default_graph()._graph_key # pylint: disable=protected-access
with context.graph_mode():
# This graph will store both the initialization and the call version of the
# wrapped function. It will later be used by the backprop code to build the
# backprop graph, if necessary.
captures = {}
tmp_graph = function.CapturingGraph(captures)
# Inherit the graph key from the original graph to ensure optimizers don't
# misbehave.
tmp_graph._container = container # pylint: disable=protected-access
tmp_graph._graph_key = graph_key # pylint: disable=protected-access
with tmp_graph.as_default():
# Placeholders for the non-variable inputs.
func_inputs = _get_graph_callable_inputs(shape_and_dtypes)
func_num_args = len(tf_inspect.getargspec(func).args)
if len(func_inputs) != func_num_args:
raise TypeError("The number of arguments accepted by the decorated "
"function `%s` (%d) must match the number of "
"ShapeAndDtype objects passed to the graph_callable() "
"decorator (%d)." %
(func.__name__, func_num_args, len(func_inputs)))
# First call the function to generate a graph which can initialize all
# variables. As a side-effect this will populate the variable capturing
# scope's view of which variables exist.
variable_captures = _VariableCapturingScope()
with variable_captures.initializing_scope(
), function.AutomaticControlDependencies() as a:
func_outputs = func(*func_inputs)
outputs_list = nest.flatten(func_outputs)
for i, x in enumerate(outputs_list):
if x is not None:
outputs_list[i] = a.mark_as_return(x)
if len(outputs_list) == 1 and outputs_list[0] is None:
outputs_list = []
output_shapes = [x.shape for x in outputs_list]
if not all(isinstance(x, tf_ops.Tensor) for x in outputs_list):
raise ValueError("Found non-tensor output in %s" % str(outputs_list))
initializing_operations = tmp_graph.get_operations()
# Call the function again, now replacing usages of variables with
# placeholders. This assumes the variable capturing scope created above
# knows about all variables.
tmp_graph.clear_resource_control_flow_state()
with variable_captures.capturing_scope(
), function.AutomaticControlDependencies() as a:
captured_outputs = func(*func_inputs)
captured_outlist = nest.flatten(captured_outputs)
for i, x in enumerate(captured_outlist):
if x is not None:
captured_outlist[i] = a.mark_as_return(x)
capturing_operations = tmp_graph.get_operations()[
len(initializing_operations):]
sorted_variables = sorted(variable_captures.variables.values(),
key=lambda x: x.name)
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(*[captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, tf_ops.Tensor)]
placeholder_inputs = flat_inputs+ list(extra_placeholders)
func_def_outputs = [x for x in outputs_list if isinstance(x, tf_ops.Tensor)]
initialization_name = function._inference_name(func.__name__) # pylint: disable=protected-access
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
# Also, what about the gradient registry of these functions? Those need to be
# addressed as well.
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
function._register(f._c_func.func) # pylint: disable=protected-access
initializer_function = function.GraphModeFunction(
initialization_name,
placeholder_inputs,
extra_inputs,
tmp_graph,
initializing_operations,
func_def_outputs,
func_outputs,
output_shapes)
capture_func_def_outputs = [
x for x in captured_outlist if isinstance(x, tf_ops.Tensor)]
captured_function_name = function._inference_name(func.__name__) # pylint: disable=protected-access
captured_function = function.GraphModeFunction(
captured_function_name,
placeholder_inputs,
extra_inputs,
tmp_graph,
capturing_operations,
capture_func_def_outputs,
captured_outputs,
output_shapes,
variables=[x.variable for x in sorted_variables])
return _InitializingFunctionObject(captured_function, initializer_function,
shape_and_dtypes)
class ShapeAndDtype(object):
"""Data type that packages together shape and type information.
Used for arguments to graph callables. See graph_callable() for an example.
"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
def graph_callable(shape_and_dtypes):
"""Decorator that produces a callable that executes a TensorFlow graph.
When applied on a function that constructs a TensorFlow graph, this decorator
produces a callable object that:
1. Executes the graph when invoked. The first call will initialize any
variables defined in the graph.
2. Provides a .variables() method to return the list of TensorFlow variables
defined in the graph.
Note that the wrapped function is not allowed to change the values of the
variables, just use them.
The return value of the wrapped function must be one of the following:
(1) None, (2) a Tensor, or (3) a possibly nested sequence of Tensors.
Example:
```python
@tfe.graph_callable([tfe.ShapeAndDtype(shape(), dtype=dtypes.float32)])
def foo(x):
v = tf.get_variable('v', initializer=tf.ones_initializer(), shape=())
return v + x
ret = foo(tfe.Tensor(2.0)) # `ret` here is a Tensor with value 3.0.
foo.variables[0].assign(7.0) # Modify the value of variable `v`.
ret = foo(tfe.Tensor(2.0)) # `ret` here now is a Tensor with value 9.0.
```
Args:
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects
that specifies shape and type information for each of the callable's
arguments. The length of this list must be equal to the number of
arguments accepted by the wrapped function.
Returns:
A callable graph object.
"""
# TODO(alive,apassos): support initialized_value and friends from tf.Variable.
assert context.executing_eagerly(), (
"graph_callable can only be used when Eager execution is enabled.")
def decorator(func):
return tf_decorator.make_decorator(func,
_graph_callable_internal(
func, shape_and_dtypes))
return decorator
|
|
# -*- coding: utf-8 -*-
'''
The rcctl service module for OpenBSD
'''
from __future__ import absolute_import
# Import python libs
import os
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
from salt.exceptions import CommandNotFoundError
__func_alias__ = {
'reload_': 'reload'
}
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
rcctl(8) is only available on OpenBSD.
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/usr/sbin/rcctl'):
return __virtualname__
return (False, 'The openbsdpkg execution module cannot be loaded: '
'only available on OpenBSD systems.')
@decorators.memoize
def _cmd():
'''
Return the full path to the rcctl(8) command.
'''
rcctl = salt.utils.which('rcctl')
if not rcctl:
raise CommandNotFoundError
return rcctl
def _get_flags(**kwargs):
'''
Return the configured service flags.
'''
flags = kwargs.get('flags',
__salt__['config.option']('service.flags',
default=''))
return flags
def available(name):
'''
Return True if the named service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
cmd = '{0} get {1}'.format(_cmd(), name)
if __salt__['cmd.retcode'](cmd) == 2:
return False
return True
def missing(name):
'''
The inverse of service.available.
Return True if the named service is not available.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return not available(name)
def get_all():
'''
Return all installed services.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
ret = []
service = _cmd()
for svc in __salt__['cmd.run']('{0} ls all'.format(service)).splitlines():
ret.append(svc)
return sorted(ret)
def get_disabled():
'''
Return what services are available but not enabled to start at boot.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
ret = []
service = _cmd()
for svc in __salt__['cmd.run']('{0} ls off'.format(service)).splitlines():
ret.append(svc)
return sorted(ret)
def get_enabled():
'''
Return what services are set to run on boot.
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = []
service = _cmd()
for svc in __salt__['cmd.run']('{0} ls on'.format(service)).splitlines():
ret.append(svc)
return sorted(ret)
def start(name):
'''
Start the named service.
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '{0} -f start {1}'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the named service.
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '{0} stop {1}'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service.
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '{0} -f restart {1}'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd)
def reload_(name):
'''
Reload the named service.
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '{0} reload {1}'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '{0} check {1}'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd)
def enable(name, **kwargs):
'''
Enable the named service to start at boot.
flags : None
Set optional flags to run the service with.
service.flags can be used to change the default flags.
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
salt '*' service.enable <service name> flags=<flags>
'''
stat_cmd = '{0} set {1} status on'.format(_cmd(), name)
stat_retcode = __salt__['cmd.retcode'](stat_cmd)
flag_retcode = None
# only (re)set flags for services that have an rc.d(8) script
if os.path.exists('/etc/rc.d/{0}'.format(name)):
flags = _get_flags(**kwargs)
flag_cmd = '{0} set {1} flags {2}'.format(_cmd(), name, flags)
flag_retcode = __salt__['cmd.retcode'](flag_cmd)
return not any([stat_retcode, flag_retcode])
def disable(name, **kwargs):
'''
Disable the named service to not start at boot.
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
cmd = '{0} set {1} status off'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd)
def disabled(name):
'''
Return True if the named service is disabled at boot, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
cmd = '{0} get {1} status'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd) == 0
def enabled(name, **kwargs):
'''
Return True if the named service is enabled at boot and the provided
flags match the configured ones (if any). Return False otherwise.
name
Service name
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
salt '*' service.enabled <service name> flags=<flags>
'''
cmd = '{0} get {1} status'.format(_cmd(), name)
if not __salt__['cmd.retcode'](cmd):
# also consider a service disabled if the current flags are different
# than the configured ones so we have a chance to update them
flags = _get_flags(**kwargs)
cur_flags = __salt__['cmd.run_stdout']('{0} get {1} flags'.format(_cmd(), name))
if format(flags) == format(cur_flags):
return True
if not flags:
def_flags = __salt__['cmd.run_stdout']('{0} getdef {1} flags'.format(_cmd(), name))
if format(cur_flags) == format(def_flags):
return True
return False
|
|
# -*- coding: utf-8 -*-
"""
Strongly connected components.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['number_strongly_connected_components',
'strongly_connected_components',
'strongly_connected_component_subgraphs',
'is_strongly_connected',
'strongly_connected_components_recursive',
'kosaraju_strongly_connected_components',
'condensation']
def strongly_connected_components(G):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError: If G is undirected.
See Also
--------
connected_components, weakly_connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
Nonrecursive version of algorithm.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
preorder={}
lowlink={}
scc_found={}
scc_queue = []
scc_list=[]
i=0 # Preorder counter
for source in G:
if source not in scc_found:
queue=[source]
while queue:
v=queue[-1]
if v not in preorder:
i=i+1
preorder[v]=i
done=1
v_nbrs=G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done=0
break
if done==1:
lowlink[v]=preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w]>preorder[v]:
lowlink[v]=min([lowlink[v],lowlink[w]])
else:
lowlink[v]=min([lowlink[v],preorder[w]])
queue.pop()
if lowlink[v]==preorder[v]:
scc_found[v]=True
scc=[v]
while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
k=scc_queue.pop()
scc_found[k]=True
scc.append(k)
scc_list.append(scc)
else:
scc_queue.append(v)
scc_list.sort(key=len,reverse=True)
return scc_list
def kosaraju_strongly_connected_components(G,source=None):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError: If G is undirected
See Also
--------
connected_components
Notes
-----
Uses Kosaraju's algorithm.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
components=[]
G=G.reverse(copy=False)
post=list(nx.dfs_postorder_nodes(G,source=source))
G=G.reverse(copy=False)
seen={}
while post:
r=post.pop()
if r in seen:
continue
c=nx.dfs_preorder_nodes(G,r)
new=[v for v in c if v not in seen]
seen.update([(u,True) for u in new])
components.append(new)
components.sort(key=len,reverse=True)
return components
def strongly_connected_components_recursive(G):
"""Return nodes in strongly connected components of graph.
Recursive version of algorithm.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
Raises
------
NetworkXError : If G is undirected
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
def visit(v,cnt):
root[v]=cnt
visited[v]=cnt
cnt+=1
stack.append(v)
for w in G[v]:
if w not in visited: visit(w,cnt)
if w not in component:
root[v]=min(root[v],root[w])
if root[v]==visited[v]:
component[v]=root[v]
tmpc=[v] # hold nodes in this component
while stack[-1]!=v:
w=stack.pop()
component[w]=root[v]
tmpc.append(w)
stack.remove(v)
scc.append(tmpc) # add to scc list
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
Use connected_components() """)
scc=[]
visited={}
component={}
root={}
cnt=0
stack=[]
for source in G:
if source not in visited:
visit(source,cnt)
scc.sort(key=len,reverse=True)
return scc
def strongly_connected_component_subgraphs(G):
"""Return strongly connected components as subgraphs.
Parameters
----------
G : NetworkX Graph
A graph.
Returns
-------
glist : list
A list of graphs, one for each strongly connected component of G.
See Also
--------
connected_component_subgraphs
Notes
-----
The list is ordered from largest strongly connected component to smallest.
Graph, node, and edge attributes are copied to the subgraphs.
"""
cc=strongly_connected_components(G)
graph_list=[]
for c in cc:
graph_list.append(G.subgraph(c).copy())
return graph_list
def number_strongly_connected_components(G):
"""Return number of strongly connected components in graph.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of strongly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(strongly_connected_components(G))
def is_strongly_connected(G):
"""Test directed graph for strong connectivity.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is strongly connected, False otherwise.
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if len(G)==0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(strongly_connected_components(G)[0])==len(G)
def condensation(G, scc=None):
"""Returns the condensation of G.
The condensation of G is the graph with each of the strongly connected
components contracted into a single node.
Parameters
----------
G : NetworkX DiGraph
A directed graph.
scc: list (optional, default=None)
A list of strongly connected components. If provided, the elements in
`scc` must partition the nodes in `G`. If not provided, it will be
calculated as scc=nx.strongly_connected_components(G).
Returns
-------
C : NetworkX DiGraph
The condensation of G. The node labels are integers corresponding
to the index of the component in the list of strongly connected
components.
Raises
------
NetworkXError: If G is not directed
Notes
-----
After contracting all strongly connected components to a single node,
the resulting graph is a directed acyclic graph.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if scc is None:
scc = nx.strongly_connected_components(G)
mapping = {}
C = nx.DiGraph()
for i,component in enumerate(scc):
for n in component:
mapping[n] = i
C.add_nodes_from(range(len(scc)))
for u,v in G.edges():
if mapping[u] != mapping[v]:
C.add_edge(mapping[u],mapping[v])
return C
|
|
# -*- test-case-name: twisted.conch.test.test_filetransfer -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import struct, errno
from twisted.internet import defer, protocol
from twisted.python import failure, log
from common import NS, getNS
from twisted.conch.interfaces import ISFTPServer, ISFTPFile
from zope import interface
class FileTransferBase(protocol.Protocol):
versions = (3, )
packetTypes = {}
def __init__(self):
self.buf = ''
self.otherVersion = None # this gets set
def sendPacket(self, kind, data):
self.transport.write(struct.pack('!LB', len(data)+1, kind) + data)
def dataReceived(self, data):
self.buf += data
while len(self.buf) > 5:
length, kind = struct.unpack('!LB', self.buf[:5])
if len(self.buf) < 4 + length:
return
data, self.buf = self.buf[5:4+length], self.buf[4+length:]
packetType = self.packetTypes.get(kind, None)
if not packetType:
log.msg('no packet type for', kind)
continue
f = getattr(self, 'packet_%s' % packetType, None)
if not f:
log.msg('not implemented: %s' % packetType)
log.msg(repr(data[4:]))
reqId, = struct.unpack('!L', data[:4])
self._sendStatus(reqId, FX_OP_UNSUPPORTED,
"don't understand %s" % packetType)
#XXX not implemented
continue
try:
f(data)
except:
log.err()
continue
reqId ,= struct.unpack('!L', data[:4])
self._ebStatus(failure.Failure(e), reqId)
def _parseAttributes(self, data):
flags ,= struct.unpack('!L', data[:4])
attrs = {}
data = data[4:]
if flags & FILEXFER_ATTR_SIZE == FILEXFER_ATTR_SIZE:
size ,= struct.unpack('!Q', data[:8])
attrs['size'] = size
data = data[8:]
if flags & FILEXFER_ATTR_OWNERGROUP == FILEXFER_ATTR_OWNERGROUP:
uid, gid = struct.unpack('!2L', data[:8])
attrs['uid'] = uid
attrs['gid'] = gid
data = data[8:]
if flags & FILEXFER_ATTR_PERMISSIONS == FILEXFER_ATTR_PERMISSIONS:
perms ,= struct.unpack('!L', data[:4])
attrs['permissions'] = perms
data = data[4:]
if flags & FILEXFER_ATTR_ACMODTIME == FILEXFER_ATTR_ACMODTIME:
atime, mtime = struct.unpack('!2L', data[:8])
attrs['atime'] = atime
attrs['mtime'] = mtime
data = data[8:]
if flags & FILEXFER_ATTR_EXTENDED == FILEXFER_ATTR_EXTENDED:
extended_count ,= struct.unpack('!L', data[:4])
data = data[4:]
for i in xrange(extended_count):
extended_type, data = getNS(data)
extended_data, data = getNS(data)
attrs['ext_%s' % extended_type] = extended_data
return attrs, data
def _packAttributes(self, attrs):
flags = 0
data = ''
if 'size' in attrs:
data += struct.pack('!Q', attrs['size'])
flags |= FILEXFER_ATTR_SIZE
if 'uid' in attrs and 'gid' in attrs:
data += struct.pack('!2L', attrs['uid'], attrs['gid'])
flags |= FILEXFER_ATTR_OWNERGROUP
if 'permissions' in attrs:
data += struct.pack('!L', attrs['permissions'])
flags |= FILEXFER_ATTR_PERMISSIONS
if 'atime' in attrs and 'mtime' in attrs:
data += struct.pack('!2L', attrs['atime'], attrs['mtime'])
flags |= FILEXFER_ATTR_ACMODTIME
extended = []
for k in attrs:
if k.startswith('ext_'):
ext_type = NS(k[4:])
ext_data = NS(attrs[k])
extended.append(ext_type+ext_data)
if extended:
data += struct.pack('!L', len(extended))
data += ''.join(extended)
flags |= FILEXFER_ATTR_EXTENDED
return struct.pack('!L', flags) + data
class FileTransferServer(FileTransferBase):
def __init__(self, data=None, avatar=None):
FileTransferBase.__init__(self)
self.client = ISFTPServer(avatar) # yay interfaces
self.openFiles = {}
self.openDirs = {}
def packet_INIT(self, data):
version ,= struct.unpack('!L', data[:4])
self.version = min(list(self.versions) + [version])
data = data[4:]
ext = {}
while data:
ext_name, data = getNS(data)
ext_data, data = getNS(data)
ext[ext_name] = ext_data
our_ext = self.client.gotVersion(version, ext)
our_ext_data = ""
for (k,v) in our_ext.items():
our_ext_data += NS(k) + NS(v)
self.sendPacket(FXP_VERSION, struct.pack('!L', self.version) + \
our_ext_data)
def packet_OPEN(self, data):
requestId = data[:4]
data = data[4:]
filename, data = getNS(data)
flags ,= struct.unpack('!L', data[:4])
data = data[4:]
attrs, data = self._parseAttributes(data)
assert data == '', 'still have data in OPEN: %s' % repr(data)
d = defer.maybeDeferred(self.client.openFile, filename, flags, attrs)
d.addCallback(self._cbOpenFile, requestId)
d.addErrback(self._ebStatus, requestId, "open failed")
def _cbOpenFile(self, fileObj, requestId):
fileId = str(hash(fileObj))
if fileId in self.openFiles:
raise KeyError, 'id already open'
self.openFiles[fileId] = fileObj
self.sendPacket(FXP_HANDLE, requestId + NS(fileId))
def packet_CLOSE(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
assert data == '', 'still have data in CLOSE: %s' % repr(data)
if handle in self.openFiles:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.close)
d.addCallback(self._cbClose, handle, requestId)
d.addErrback(self._ebStatus, requestId, "close failed")
elif handle in self.openDirs:
dirObj = self.openDirs[handle][0]
d = defer.maybeDeferred(dirObj.close)
d.addCallback(self._cbClose, handle, requestId, 1)
d.addErrback(self._ebStatus, requestId, "close failed")
else:
self._ebClose(failure.Failure(KeyError()), requestId)
def _cbClose(self, result, handle, requestId, isDir = 0):
if isDir:
del self.openDirs[handle]
else:
del self.openFiles[handle]
self._sendStatus(requestId, FX_OK, 'file closed')
def packet_READ(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
(offset, length), data = struct.unpack('!QL', data[:12]), data[12:]
assert data == '', 'still have data in READ: %s' % repr(data)
if handle not in self.openFiles:
self._ebRead(failure.Failure(KeyError()), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.readChunk, offset, length)
d.addCallback(self._cbRead, requestId)
d.addErrback(self._ebStatus, requestId, "read failed")
def _cbRead(self, result, requestId):
if result == '': # python's read will return this for EOF
raise EOFError()
self.sendPacket(FXP_DATA, requestId + NS(result))
def packet_WRITE(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
offset, = struct.unpack('!Q', data[:8])
data = data[8:]
writeData, data = getNS(data)
assert data == '', 'still have data in WRITE: %s' % repr(data)
if handle not in self.openFiles:
self._ebWrite(failure.Failure(KeyError()), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.writeChunk, offset, writeData)
d.addCallback(self._cbStatus, requestId, "write succeeded")
d.addErrback(self._ebStatus, requestId, "write failed")
def packet_REMOVE(self, data):
requestId = data[:4]
data = data[4:]
filename, data = getNS(data)
assert data == '', 'still have data in REMOVE: %s' % repr(data)
d = defer.maybeDeferred(self.client.removeFile, filename)
d.addCallback(self._cbStatus, requestId, "remove succeeded")
d.addErrback(self._ebStatus, requestId, "remove failed")
def packet_RENAME(self, data):
requestId = data[:4]
data = data[4:]
oldPath, data = getNS(data)
newPath, data = getNS(data)
assert data == '', 'still have data in RENAME: %s' % repr(data)
d = defer.maybeDeferred(self.client.renameFile, oldPath, newPath)
d.addCallback(self._cbStatus, requestId, "rename succeeded")
d.addErrback(self._ebStatus, requestId, "rename failed")
def packet_MKDIR(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
attrs, data = self._parseAttributes(data)
assert data == '', 'still have data in MKDIR: %s' % repr(data)
d = defer.maybeDeferred(self.client.makeDirectory, path, attrs)
d.addCallback(self._cbStatus, requestId, "mkdir succeeded")
d.addErrback(self._ebStatus, requestId, "mkdir failed")
def packet_RMDIR(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == '', 'still have data in RMDIR: %s' % repr(data)
d = defer.maybeDeferred(self.client.removeDirectory, path)
d.addCallback(self._cbStatus, requestId, "rmdir succeeded")
d.addErrback(self._ebStatus, requestId, "rmdir failed")
def packet_OPENDIR(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == '', 'still have data in OPENDIR: %s' % repr(data)
d = defer.maybeDeferred(self.client.openDirectory, path)
d.addCallback(self._cbOpenDirectory, requestId)
d.addErrback(self._ebStatus, requestId, "opendir failed")
def _cbOpenDirectory(self, dirObj, requestId):
handle = str(hash(dirObj))
if handle in self.openDirs:
raise KeyError, "already opened this directory"
self.openDirs[handle] = [dirObj, iter(dirObj)]
self.sendPacket(FXP_HANDLE, requestId + NS(handle))
def packet_READDIR(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
assert data == '', 'still have data in READDIR: %s' % repr(data)
if handle not in self.openDirs:
self._ebStatus(failure.Failure(KeyError()), requestId)
else:
dirObj, dirIter = self.openDirs[handle]
d = defer.maybeDeferred(self._scanDirectory, dirIter, [])
d.addCallback(self._cbSendDirectory, requestId)
d.addErrback(self._ebStatus, requestId, "scan directory failed")
def _scanDirectory(self, dirIter, f):
while len(f) < 250:
try:
info = dirIter.next()
except StopIteration:
if not f:
raise EOFError
return f
if isinstance(info, defer.Deferred):
info.addCallback(self._cbScanDirectory, dirIter, f)
return
else:
f.append(info)
return f
def _cbScanDirectory(self, result, dirIter, f):
f.append(result)
return self._scanDirectory(dirIter, f)
def _cbSendDirectory(self, result, requestId):
data = ''
for (filename, longname, attrs) in result:
data += NS(filename)
data += NS(longname)
data += self._packAttributes(attrs)
self.sendPacket(FXP_NAME, requestId +
struct.pack('!L', len(result))+data)
def packet_STAT(self, data, followLinks = 1):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == '', 'still have data in STAT/LSTAT: %s' % repr(data)
d = defer.maybeDeferred(self.client.getAttrs, path, followLinks)
d.addCallback(self._cbStat, requestId)
d.addErrback(self._ebStatus, requestId, 'stat/lstat failed')
def packet_LSTAT(self, data):
self.packet_STAT(data, 0)
def packet_FSTAT(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
assert data == '', 'still have data in FSTAT: %s' % repr(data)
if handle not in self.openFiles:
self._ebStatus(failure.Failure(KeyError('%s not in self.openFiles'
% handle)), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.getAttrs)
d.addCallback(self._cbStat, requestId)
d.addErrback(self._ebStatus, requestId, 'fstat failed')
def _cbStat(self, result, requestId):
data = requestId + self._packAttributes(result)
self.sendPacket(FXP_ATTRS, data)
def packet_SETSTAT(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
attrs, data = self._parseAttributes(data)
if data != '':
log.msg('WARN: still have data in SETSTAT: %s' % repr(data))
d = defer.maybeDeferred(self.client.setAttrs, path, attrs)
d.addCallback(self._cbStatus, requestId, 'setstat succeeded')
d.addErrback(self._ebStatus, requestId, 'setstat failed')
def packet_FSETSTAT(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
attrs, data = self._parseAttributes(data)
assert data == '', 'still have data in FSETSTAT: %s' % repr(data)
if handle not in self.openFiles:
self._ebStatus(failure.Failure(KeyError()), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.setAttrs, attrs)
d.addCallback(self._cbStatus, requestId, 'fsetstat succeeded')
d.addErrback(self._ebStatus, requestId, 'fsetstat failed')
def packet_READLINK(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == '', 'still have data in READLINK: %s' % repr(data)
d = defer.maybeDeferred(self.client.readLink, path)
d.addCallback(self._cbReadLink, requestId)
d.addErrback(self._ebStatus, requestId, 'readlink failed')
def _cbReadLink(self, result, requestId):
self._cbSendDirectory([(result, '', {})], requestId)
def packet_SYMLINK(self, data):
requestId = data[:4]
data = data[4:]
linkPath, data = getNS(data)
targetPath, data = getNS(data)
d = defer.maybeDeferred(self.client.makeLink, linkPath, targetPath)
d.addCallback(self._cbStatus, requestId, 'symlink succeeded')
d.addErrback(self._ebStatus, requestId, 'symlink failed')
def packet_REALPATH(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == '', 'still have data in REALPATH: %s' % repr(data)
d = defer.maybeDeferred(self.client.realPath, path)
d.addCallback(self._cbReadLink, requestId) # same return format
d.addErrback(self._ebStatus, requestId, 'realpath failed')
def packet_EXTENDED(self, data):
requestId = data[:4]
data = data[4:]
extName, extData = getNS(data)
d = defer.maybeDeferred(self.client.extendedRequest, extName, extData)
d.addCallback(self._cbExtended, requestId)
d.addErrback(self._ebStatus, requestId, 'extended %s failed' % extName)
def _cbExtended(self, data, requestId):
self.sendPacket(FXP_EXTENDED_REPLY, requestId + data)
def _cbStatus(self, result, requestId, msg = "request succeeded"):
self._sendStatus(requestId, FX_OK, msg)
def _ebStatus(self, reason, requestId, msg = "request failed"):
code = FX_FAILURE
message = msg
if reason.type in (IOError, OSError):
if reason.value.errno == errno.ENOENT: # no such file
code = FX_NO_SUCH_FILE
message = reason.value.strerror
elif reason.value.errno == errno.EACCES: # permission denied
code = FX_PERMISSION_DENIED
message = reason.value.strerror
elif reason.value.errno == errno.EEXIST:
code = FX_FILE_ALREADY_EXISTS
else:
log.err(reason)
elif reason.type == EOFError: # EOF
code = FX_EOF
if reason.value.args:
message = reason.value.args[0]
elif reason.type == NotImplementedError:
code = FX_OP_UNSUPPORTED
if reason.value.args:
message = reason.value.args[0]
elif reason.type == SFTPError:
code = reason.value.code
message = reason.value.message
else:
log.err(reason)
self._sendStatus(requestId, code, message)
def _sendStatus(self, requestId, code, message, lang = ''):
"""
Helper method to send a FXP_STATUS message.
"""
data = requestId + struct.pack('!L', code)
data += NS(message)
data += NS(lang)
self.sendPacket(FXP_STATUS, data)
def connectionLost(self, reason):
"""
Clean all opened files and directories.
"""
for fileObj in self.openFiles.values():
fileObj.close()
self.openFiles = {}
for (dirObj, dirIter) in self.openDirs.values():
dirObj.close()
self.openDirs = {}
class FileTransferClient(FileTransferBase):
def __init__(self, extData = {}):
"""
@param extData: a dict of extended_name : extended_data items
to be sent to the server.
"""
FileTransferBase.__init__(self)
self.extData = {}
self.counter = 0
self.openRequests = {} # id -> Deferred
self.wasAFile = {} # Deferred -> 1 TERRIBLE HACK
def connectionMade(self):
data = struct.pack('!L', max(self.versions))
for k,v in self.extData.itervalues():
data += NS(k) + NS(v)
self.sendPacket(FXP_INIT, data)
def _sendRequest(self, msg, data):
data = struct.pack('!L', self.counter) + data
d = defer.Deferred()
self.openRequests[self.counter] = d
self.counter += 1
self.sendPacket(msg, data)
return d
def _parseRequest(self, data):
(id,) = struct.unpack('!L', data[:4])
d = self.openRequests[id]
del self.openRequests[id]
return d, data[4:]
def openFile(self, filename, flags, attrs):
"""
Open a file.
This method returns a L{Deferred} that is called back with an object
that provides the L{ISFTPFile} interface.
@param filename: a string representing the file to open.
@param flags: a integer of the flags to open the file with, ORed together.
The flags and their values are listed at the bottom of this file.
@param attrs: a list of attributes to open the file with. It is a
dictionary, consisting of 0 or more keys. The possible keys are::
size: the size of the file in bytes
uid: the user ID of the file as an integer
gid: the group ID of the file as an integer
permissions: the permissions of the file with as an integer.
the bit representation of this field is defined by POSIX.
atime: the access time of the file as seconds since the epoch.
mtime: the modification time of the file as seconds since the epoch.
ext_*: extended attributes. The server is not required to
understand this, but it may.
NOTE: there is no way to indicate text or binary files. it is up
to the SFTP client to deal with this.
"""
data = NS(filename) + struct.pack('!L', flags) + self._packAttributes(attrs)
d = self._sendRequest(FXP_OPEN, data)
self.wasAFile[d] = (1, filename) # HACK
return d
def removeFile(self, filename):
"""
Remove the given file.
This method returns a Deferred that is called back when it succeeds.
@param filename: the name of the file as a string.
"""
return self._sendRequest(FXP_REMOVE, NS(filename))
def renameFile(self, oldpath, newpath):
"""
Rename the given file.
This method returns a Deferred that is called back when it succeeds.
@param oldpath: the current location of the file.
@param newpath: the new file name.
"""
return self._sendRequest(FXP_RENAME, NS(oldpath)+NS(newpath))
def makeDirectory(self, path, attrs):
"""
Make a directory.
This method returns a Deferred that is called back when it is
created.
@param path: the name of the directory to create as a string.
@param attrs: a dictionary of attributes to create the directory
with. Its meaning is the same as the attrs in the openFile method.
"""
return self._sendRequest(FXP_MKDIR, NS(path)+self._packAttributes(attrs))
def removeDirectory(self, path):
"""
Remove a directory (non-recursively)
It is an error to remove a directory that has files or directories in
it.
This method returns a Deferred that is called back when it is removed.
@param path: the directory to remove.
"""
return self._sendRequest(FXP_RMDIR, NS(path))
def openDirectory(self, path):
"""
Open a directory for scanning.
This method returns a Deferred that is called back with an iterable
object that has a close() method.
The close() method is called when the client is finished reading
from the directory. At this point, the iterable will no longer
be used.
The iterable returns triples of the form (filename, longname, attrs)
or a Deferred that returns the same. The sequence must support
__getitem__, but otherwise may be any 'sequence-like' object.
filename is the name of the file relative to the directory.
logname is an expanded format of the filename. The recommended format
is:
-rwxr-xr-x 1 mjos staff 348911 Mar 25 14:29 t-filexfer
1234567890 123 12345678 12345678 12345678 123456789012
The first line is sample output, the second is the length of the field.
The fields are: permissions, link count, user owner, group owner,
size in bytes, modification time.
attrs is a dictionary in the format of the attrs argument to openFile.
@param path: the directory to open.
"""
d = self._sendRequest(FXP_OPENDIR, NS(path))
self.wasAFile[d] = (0, path)
return d
def getAttrs(self, path, followLinks=0):
"""
Return the attributes for the given path.
This method returns a dictionary in the same format as the attrs
argument to openFile or a Deferred that is called back with same.
@param path: the path to return attributes for as a string.
@param followLinks: a boolean. if it is True, follow symbolic links
and return attributes for the real path at the base. if it is False,
return attributes for the specified path.
"""
if followLinks: m = FXP_STAT
else: m = FXP_LSTAT
return self._sendRequest(m, NS(path))
def setAttrs(self, path, attrs):
"""
Set the attributes for the path.
This method returns when the attributes are set or a Deferred that is
called back when they are.
@param path: the path to set attributes for as a string.
@param attrs: a dictionary in the same format as the attrs argument to
openFile.
"""
data = NS(path) + self._packAttributes(attrs)
return self._sendRequest(FXP_SETSTAT, data)
def readLink(self, path):
"""
Find the root of a set of symbolic links.
This method returns the target of the link, or a Deferred that
returns the same.
@param path: the path of the symlink to read.
"""
d = self._sendRequest(FXP_READLINK, NS(path))
return d.addCallback(self._cbRealPath)
def makeLink(self, linkPath, targetPath):
"""
Create a symbolic link.
This method returns when the link is made, or a Deferred that
returns the same.
@param linkPath: the pathname of the symlink as a string
@param targetPath: the path of the target of the link as a string.
"""
return self._sendRequest(FXP_SYMLINK, NS(linkPath)+NS(targetPath))
def realPath(self, path):
"""
Convert any path to an absolute path.
This method returns the absolute path as a string, or a Deferred
that returns the same.
@param path: the path to convert as a string.
"""
d = self._sendRequest(FXP_REALPATH, NS(path))
return d.addCallback(self._cbRealPath)
def _cbRealPath(self, result):
name, longname, attrs = result[0]
return name
def extendedRequest(self, request, data):
"""
Make an extended request of the server.
The method returns a Deferred that is called back with
the result of the extended request.
@param request: the name of the extended request to make.
@param data: any other data that goes along with the request.
"""
return self._sendRequest(FXP_EXTENDED, NS(request) + data)
def packet_VERSION(self, data):
version, = struct.unpack('!L', data[:4])
data = data[4:]
d = {}
while data:
k, data = getNS(data)
v, data = getNS(data)
d[k]=v
self.version = version
self.gotServerVersion(version, d)
def packet_STATUS(self, data):
d, data = self._parseRequest(data)
code, = struct.unpack('!L', data[:4])
data = data[4:]
if len(data) >= 4:
msg, data = getNS(data)
if len(data) >= 4:
lang, data = getNS(data)
else:
lang = ''
else:
msg = ''
lang = ''
if code == FX_OK:
d.callback((msg, lang))
elif code == FX_EOF:
d.errback(EOFError(msg))
elif code == FX_OP_UNSUPPORTED:
d.errback(NotImplementedError(msg))
else:
d.errback(SFTPError(code, msg, lang))
def packet_HANDLE(self, data):
d, data = self._parseRequest(data)
isFile, name = self.wasAFile.pop(d)
if isFile:
cb = ClientFile(self, getNS(data)[0])
else:
cb = ClientDirectory(self, getNS(data)[0])
cb.name = name
d.callback(cb)
def packet_DATA(self, data):
d, data = self._parseRequest(data)
d.callback(getNS(data)[0])
def packet_NAME(self, data):
d, data = self._parseRequest(data)
count, = struct.unpack('!L', data[:4])
data = data[4:]
files = []
for i in range(count):
filename, data = getNS(data)
longname, data = getNS(data)
attrs, data = self._parseAttributes(data)
files.append((filename, longname, attrs))
d.callback(files)
def packet_ATTRS(self, data):
d, data = self._parseRequest(data)
d.callback(self._parseAttributes(data)[0])
def packet_EXTENDED_REPLY(self, data):
d, data = self._parseRequest(data)
d.callback(data)
def gotServerVersion(self, serverVersion, extData):
"""
Called when the client sends their version info.
@param otherVersion: an integer representing the version of the SFTP
protocol they are claiming.
@param extData: a dictionary of extended_name : extended_data items.
These items are sent by the client to indicate additional features.
"""
class ClientFile:
interface.implements(ISFTPFile)
def __init__(self, parent, handle):
self.parent = parent
self.handle = NS(handle)
def close(self):
return self.parent._sendRequest(FXP_CLOSE, self.handle)
def readChunk(self, offset, length):
data = self.handle + struct.pack("!QL", offset, length)
return self.parent._sendRequest(FXP_READ, data)
def writeChunk(self, offset, chunk):
data = self.handle + struct.pack("!Q", offset) + NS(chunk)
return self.parent._sendRequest(FXP_WRITE, data)
def getAttrs(self):
return self.parent._sendRequest(FXP_FSTAT, self.handle)
def setAttrs(self, attrs):
data = self.handle + self.parent._packAttributes(attrs)
return self.parent._sendRequest(FXP_FSTAT, data)
class ClientDirectory:
def __init__(self, parent, handle):
self.parent = parent
self.handle = NS(handle)
self.filesCache = []
def read(self):
d = self.parent._sendRequest(FXP_READDIR, self.handle)
return d
def close(self):
return self.parent._sendRequest(FXP_CLOSE, self.handle)
def __iter__(self):
return self
def next(self):
if self.filesCache:
return self.filesCache.pop(0)
d = self.read()
d.addCallback(self._cbReadDir)
d.addErrback(self._ebReadDir)
return d
def _cbReadDir(self, names):
self.filesCache = names[1:]
return names[0]
def _ebReadDir(self, reason):
reason.trap(EOFError)
def _():
raise StopIteration
self.next = _
return reason
class SFTPError(Exception):
def __init__(self, errorCode, errorMessage, lang = ''):
Exception.__init__(self)
self.code = errorCode
self._message = errorMessage
self.lang = lang
def message(self):
"""
A string received over the network that explains the error to a human.
"""
# Python 2.6 deprecates assigning to the 'message' attribute of an
# exception. We define this read-only property here in order to
# prevent the warning about deprecation while maintaining backwards
# compatibility with object clients that rely on the 'message'
# attribute being set correctly. See bug #3897.
return self._message
message = property(message)
def __str__(self):
return 'SFTPError %s: %s' % (self.code, self.message)
FXP_INIT = 1
FXP_VERSION = 2
FXP_OPEN = 3
FXP_CLOSE = 4
FXP_READ = 5
FXP_WRITE = 6
FXP_LSTAT = 7
FXP_FSTAT = 8
FXP_SETSTAT = 9
FXP_FSETSTAT = 10
FXP_OPENDIR = 11
FXP_READDIR = 12
FXP_REMOVE = 13
FXP_MKDIR = 14
FXP_RMDIR = 15
FXP_REALPATH = 16
FXP_STAT = 17
FXP_RENAME = 18
FXP_READLINK = 19
FXP_SYMLINK = 20
FXP_STATUS = 101
FXP_HANDLE = 102
FXP_DATA = 103
FXP_NAME = 104
FXP_ATTRS = 105
FXP_EXTENDED = 200
FXP_EXTENDED_REPLY = 201
FILEXFER_ATTR_SIZE = 0x00000001
FILEXFER_ATTR_UIDGID = 0x00000002
FILEXFER_ATTR_OWNERGROUP = FILEXFER_ATTR_UIDGID
FILEXFER_ATTR_PERMISSIONS = 0x00000004
FILEXFER_ATTR_ACMODTIME = 0x00000008
FILEXFER_ATTR_EXTENDED = 0x80000000L
FILEXFER_TYPE_REGULAR = 1
FILEXFER_TYPE_DIRECTORY = 2
FILEXFER_TYPE_SYMLINK = 3
FILEXFER_TYPE_SPECIAL = 4
FILEXFER_TYPE_UNKNOWN = 5
FXF_READ = 0x00000001
FXF_WRITE = 0x00000002
FXF_APPEND = 0x00000004
FXF_CREAT = 0x00000008
FXF_TRUNC = 0x00000010
FXF_EXCL = 0x00000020
FXF_TEXT = 0x00000040
FX_OK = 0
FX_EOF = 1
FX_NO_SUCH_FILE = 2
FX_PERMISSION_DENIED = 3
FX_FAILURE = 4
FX_BAD_MESSAGE = 5
FX_NO_CONNECTION = 6
FX_CONNECTION_LOST = 7
FX_OP_UNSUPPORTED = 8
FX_FILE_ALREADY_EXISTS = 11
# http://tools.ietf.org/wg/secsh/draft-ietf-secsh-filexfer/ defines more
# useful error codes, but so far OpenSSH doesn't implement them. We use them
# internally for clarity, but for now define them all as FX_FAILURE to be
# compatible with existing software.
FX_NOT_A_DIRECTORY = FX_FAILURE
FX_FILE_IS_A_DIRECTORY = FX_FAILURE
# initialize FileTransferBase.packetTypes:
g = globals()
for name in g.keys():
if name.startswith('FXP_'):
value = g[name]
FileTransferBase.packetTypes[value] = name[4:]
del g, name, value
|
|
"""
Filename: plot_water_budget.py
Author: Damien Irving, irving.damien@gmail.com
Description: Plot climatology and trends in precipitation and evaporation
Input: List of netCDF files to plot
Output: An image in either bitmap (e.g. .png) or vector (e.g. .svg, .eps) format
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import iris
import iris.plot as iplt
from iris.experimental.equalise_cubes import equalise_attributes
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import timeseries
import grids
import convenient_universal as uconv
import spatial_weights
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
line_characteristics = {'pr': ('precipitation', 'blue', 'dotted'),
'evspsbl': ('evaporation', 'orange', 'dotted'),
'pe': ('P-E', 'green', 'solid')
}
plot_order = ['pr', 'evspsbl', 'pe']
def get_data(filenames, var, metadata_dict, time_constraint, area=False, invert_evap=False):
"""Read, merge, temporally aggregate and calculate zonal mean."""
if filenames:
with iris.FUTURE.context(cell_datetime_objects=True):
cube = iris.load(filenames, gio.check_iris_var(var))
metadata_dict[filenames[0]] = cube[0].attributes['history']
equalise_attributes(cube)
iris.util.unify_time_units(cube)
cube = cube.concatenate_cube()
cube = gio.check_time_units(cube)
cube = iris.util.squeeze(cube)
cube = cube.extract(time_constraint)
cube = timeseries.convert_to_annual(cube, full_months=True)
cube, coord_names, regrid_status = grids.curvilinear_to_rectilinear(cube)
assert cube.units == 'kg m-2 s-1'
cube.data = cube.data * 86400
units = 'mm/day'
if invert_evap and (var == 'water_evaporation_flux'):
cube.data = cube.data * -1
if area:
cube = spatial_weights.multiply_by_area(cube)
zonal_mean = cube.collapsed('longitude', iris.analysis.MEAN)
zonal_mean.remove_coord('longitude')
else:
zonal_mean = None
return zonal_mean, metadata_dict
def calc_trend_cube(cube):
"""Calculate trend and put into appropriate cube."""
trend_array = timeseries.calc_trend(cube, per_yr=True)
new_cube = cube[0,:].copy()
new_cube.remove_coord('time')
new_cube.data = trend_array
return new_cube
def climatology_plot(cube_dict, gs, plotnum, area_scaled=False):
"""Plot the climatology """
ax = plt.subplot(gs[plotnum])
plt.sca(ax)
for var in plot_order:
if cube_dict[var]:
climatology_cube = cube_dict[var].collapsed('time', iris.analysis.MEAN)
label, color, style = line_characteristics[var]
iplt.plot(climatology_cube, label=label, color=color, linestyle=style)
ax.axhline(y=0, color='0.5', linestyle='--', linewidth=0.5)
ax.legend()
ax.set_title('climatology')
if area_scaled:
ax.set_ylabel('$mm \: day^{-1} \: m^2$')
else:
ax.set_ylabel('$mm \: day^{-1}$')
def trend_plot(cube_dict, gs, plotnum, area_scaled=False):
"""Plot the trends"""
ax = plt.subplot(gs[plotnum])
plt.sca(ax)
for var in plot_order:
if cube_dict[var]:
trend_cube = calc_trend_cube(cube_dict[var])
label, color, style = line_characteristics[var]
iplt.plot(trend_cube, color=color, linestyle=style)
ax.axhline(y=0, color='0.5', linestyle='--', linewidth=0.5)
ax.legend()
ax.set_title('trends')
if area_scaled:
ax.set_ylabel('$mm \: day^{-1} \: m^2 \: yr^{-1}$')
else:
ax.set_ylabel('$mm \: day^{-1} \: yr^{-1}$')
ax.set_xlabel('latitude')
def get_title(cube_dict):
"""Get the plot title."""
for cube in cube_dict.values():
if cube:
run = 'r%si%sp%s' %(cube.attributes['realization'], cube.attributes['initialization_method'], cube.attributes['physics_version'])
title = 'Surface water budget for global ocean surface \n %s, %s, %s' %(cube.attributes['model_id'], cube.attributes['experiment'], run)
break
return title
def main(inargs):
"""Run the program."""
cube_dict = {}
metadata_dict = {}
try:
time_constraint = gio.get_time_constraint(inargs.time)
except AttributeError:
time_constraint = iris.Constraint()
cube_dict['pr'], metadata_dict = get_data(inargs.pr_files, 'precipitation_flux', metadata_dict, time_constraint, area=inargs.area)
cube_dict['evspsbl'], metadata_dict = get_data(inargs.evspsbl_files, 'water_evaporation_flux', metadata_dict, time_constraint,
area=inargs.area, invert_evap=inargs.invert_evap)
cube_dict['pe'], metadata_dict = get_data(inargs.pe_files, 'precipitation_minus_evaporation_flux', metadata_dict, time_constraint, area=inargs.area)
fig = plt.figure(figsize=[12, 14])
gs = gridspec.GridSpec(2, 1)
climatology_plot(cube_dict, gs, 0, area_scaled=inargs.area)
trend_plot(cube_dict, gs, 1, area_scaled=inargs.area)
title = get_title(cube_dict)
plt.suptitle(title)
plt.savefig(inargs.outfile, bbox_inches='tight')
gio.write_metadata(inargs.outfile, file_info=metadata_dict)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description = 'Plot climatology and trends in precipitation and evaporation fluxes'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="Output file")
parser.add_argument("--pr_files", type=str, nargs='*', default=None, required=True,
help="precipitation flux files")
parser.add_argument("--evspsbl_files", type=str, nargs='*', default=None, required=True,
help="precipitation flux files")
parser.add_argument("--pe_files", type=str, nargs='*', default=None, required=True,
help="P-E flux files")
parser.add_argument("--time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
help="Time period [default = entire]")
parser.add_argument("--area", action="store_true", default=False,
help="Multiple data by area")
parser.add_argument("--invert_evap", action="store_true", default=False,
help="Multiply evap by -1")
args = parser.parse_args()
main(args)
|
|
from collections import Iterable, MutableSequence, Mapping
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import sys
from six import string_types
import numpy as np
from openmc.clean_xml import clean_xml_indentation
import openmc.checkvalue as cv
from openmc import Nuclide, VolumeCalculation, Source, Mesh
_RUN_MODES = ['eigenvalue', 'fixed source', 'plot', 'volume', 'particle restart']
_RES_SCAT_METHODS = ['dbrc', 'wcm', 'ares']
class Settings(object):
"""Settings used for an OpenMC simulation.
Attributes
----------
batches : int
Number of batches to simulate
confidence_intervals : bool
If True, uncertainties on tally results will be reported as the
half-width of the 95% two-sided confidence interval. If False,
uncertainties on tally results will be reported as the sample standard
deviation.
create_fission_neutrons : bool
Indicate whether fission neutrons should be created or not.
cross_sections : str
Indicates the path to an XML cross section listing file (usually named
cross_sections.xml). If it is not set, the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for
continuous-energy calculations and
:envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group
calculations to find the path to the XML cross section file.
cutoff : dict
Dictionary defining weight cutoff and energy cutoff. The dictionary may
have three keys, 'weight', 'weight_avg' and 'energy'. Value for 'weight'
should be a float indicating weight cutoff below which particle undergo
Russian roulette. Value for 'weight_avg' should be a float indicating
weight assigned to particles that are not killed after Russian
roulette. Value of energy should be a float indicating energy in eV
below which particle will be killed.
energy_mode : {'continuous-energy', 'multi-group'}
Set whether the calculation should be continuous-energy or multi-group.
entropy_mesh : openmc.Mesh
Mesh to be used to calculate Shannon entropy. If the mesh dimensions are
not specified. OpenMC assigns a mesh such that 20 source sites per mesh
cell are to be expected on average.
generations_per_batch : int
Number of generations per batch
inactive : int
Number of inactive batches
keff_trigger : dict
Dictionary defining a trigger on eigenvalue. The dictionary must have
two keys, 'type' and 'threshold'. Acceptable values corresponding to
type are 'variance', 'std_dev', and 'rel_err'. The threshold value
should be a float indicating the variance, standard deviation, or
relative error used.
max_order : None or int
Maximum scattering order to apply globally when in multi-group mode.
multipole_library : str
Indicates the path to a directory containing a windowed multipole
cross section library. If it is not set, the
:envvar:`OPENMC_MULTIPOLE_LIBRARY` environment variable will be used. A
multipole library is optional.
no_reduce : bool
Indicate that all user-defined and global tallies should not be reduced
across processes in a parallel calculation.
output : dict
Dictionary indicating what files to output. Valid keys are 'summary',
'cross_sections', 'tallies', and 'distribmats'. Values corresponding to
each key should be given as a boolean value.
output_path : str
Path to write output to
particles : int
Number of particles per generation
ptables : bool
Determine whether probability tables are used.
resonance_scattering : dict
Settings for resonance elastic scattering. Accepted keys are 'enable'
(bool), 'method' (str), 'energy_min' (float), 'energy_max' (float), and
'nuclides' (list). The 'method' can be set to 'dbrc' (Doppler broadening
rejection correction), 'wcm' (weight correction method), and 'ares'
(accelerated resonance elastic scattering). If not specified, 'ares' is
the default method. The 'energy_min' and 'energy_max' values indicate
the minimum and maximum energies above and below which the resonance
elastic scattering method is to be applied. The 'nuclides' list
indicates what nuclides the method should be applied to. In its absence,
the method will be applied to all nuclides with 0 K elastic scattering
data present.
run_cmfd : bool
Indicate if coarse mesh finite difference acceleration is to be used
run_mode : {'eigenvalue', 'fixed source', 'plot', 'volume', 'particle restart'}
The type of calculation to perform (default is 'eigenvalue')
seed : int
Seed for the linear congruential pseudorandom number generator
source : Iterable of openmc.Source
Distribution of source sites in space, angle, and energy
sourcepoint : dict
Options for writing source points. Acceptable keys are:
:batches: list of batches at which to write source
:overwrite: bool indicating whether to overwrite
:separate: bool indicating whether the source should be written as a
separate file
:write: bool indicating whether or not to write the source
statepoint : dict
Options for writing state points. Acceptable keys are:
:batches: list of batches at which to write source
survival_biasing : bool
Indicate whether survival biasing is to be used
tabular_legendre : dict
Determines if a multi-group scattering moment kernel expanded via
Legendre polynomials is to be converted to a tabular distribution or
not. Accepted keys are 'enable' and 'num_points'. The value for
'enable' is a bool stating whether the conversion to tabular is
performed; the value for 'num_points' sets the number of points to use
in the tabular distribution, should 'enable' be True.
temperature : dict
Defines a default temperature and method for treating intermediate
temperatures at which nuclear data doesn't exist. Accepted keys are
'default', 'method', 'tolerance', and 'multipole'. The value for
'default' should be a float representing the default temperature in
Kelvin. The value for 'method' should be 'nearest' or 'interpolation'.
If the method is 'nearest', 'tolerance' indicates a range of temperature
within which cross sections may be used. 'multipole' is a boolean
indicating whether or not the windowed multipole method should be used
to evaluate resolved resonance cross sections.
threads : int
Number of OpenMP threads
trace : tuple or list
Show detailed information about a single particle, indicated by three
integers: the batch number, generation number, and particle number
track : tuple or list
Specify particles for which track files should be written. Each particle
is identified by a triplet with the batch number, generation number, and
particle number.
trigger_active : bool
Indicate whether tally triggers are used
trigger_batch_interval : int
Number of batches in between convergence checks
trigger_max_batches : int
Maximum number of batches simulated. If this is set, the number of
batches specified via ``batches`` is interpreted as the minimum number
of batches
ufs_mesh : openmc.Mesh
Mesh to be used for redistributing source sites via the uniform fision
site (UFS) method.
verbosity : int
Verbosity during simulation between 1 and 10. Verbosity levels are
described in :ref:`verbosity`.
volume_calculations : VolumeCalculation or iterable of VolumeCalculation
Stochastic volume calculation specifications
"""
def __init__(self):
# Run mode subelement (default is 'eigenvalue')
self._run_mode = 'eigenvalue'
self._batches = None
self._generations_per_batch = None
self._inactive = None
self._particles = None
self._keff_trigger = None
# Energy mode subelement
self._energy_mode = None
self._max_order = None
# Source subelement
self._source = cv.CheckedList(Source, 'source distributions')
self._confidence_intervals = None
self._cross_sections = None
self._multipole_library = None
self._ptables = None
self._run_cmfd = None
self._seed = None
self._survival_biasing = None
# Shannon entropy mesh
self._entropy_mesh = None
# Trigger subelement
self._trigger_active = None
self._trigger_max_batches = None
self._trigger_batch_interval = None
self._output = None
self._output_path = None
# Output options
self._statepoint = {}
self._sourcepoint = {}
self._threads = None
self._no_reduce = None
self._verbosity = None
self._trace = None
self._track = None
self._tabular_legendre = {}
self._temperature = {}
# Cutoff subelement
self._cutoff = None
# Uniform fission source subelement
self._ufs_mesh = None
# Domain decomposition subelement
self._dd_mesh_dimension = None
self._dd_mesh_lower_left = None
self._dd_mesh_upper_right = None
self._dd_nodemap = None
self._dd_allow_leakage = False
self._dd_count_interactions = False
self._resonance_scattering = {}
self._volume_calculations = cv.CheckedList(
VolumeCalculation, 'volume calculations')
self._create_fission_neutrons = None
@property
def run_mode(self):
return self._run_mode
@property
def batches(self):
return self._batches
@property
def generations_per_batch(self):
return self._generations_per_batch
@property
def inactive(self):
return self._inactive
@property
def particles(self):
return self._particles
@property
def keff_trigger(self):
return self._keff_trigger
@property
def energy_mode(self):
return self._energy_mode
@property
def max_order(self):
return self._max_order
@property
def source(self):
return self._source
@property
def confidence_intervals(self):
return self._confidence_intervals
@property
def cross_sections(self):
return self._cross_sections
@property
def multipole_library(self):
return self._multipole_library
@property
def ptables(self):
return self._ptables
@property
def run_cmfd(self):
return self._run_cmfd
@property
def seed(self):
return self._seed
@property
def survival_biasing(self):
return self._survival_biasing
@property
def entropy_mesh(self):
return self._entropy_mesh
@property
def trigger_active(self):
return self._trigger_active
@property
def trigger_max_batches(self):
return self._trigger_max_batches
@property
def trigger_batch_interval(self):
return self._trigger_batch_interval
@property
def output(self):
return self._output
@property
def output_path(self):
return self._output_path
@property
def sourcepoint(self):
return self._sourcepoint
@property
def statepoint(self):
return self._statepoint
@property
def threads(self):
return self._threads
@property
def no_reduce(self):
return self._no_reduce
@property
def verbosity(self):
return self._verbosity
@property
def tabular_legendre(self):
return self._tabular_legendre
@property
def temperature(self):
return self._temperature
@property
def trace(self):
return self._trace
@property
def track(self):
return self._track
@property
def cutoff(self):
return self._cutoff
@property
def ufs_mesh(self):
return self._ufs_mesh
@property
def dd_mesh_dimension(self):
return self._dd_mesh_dimension
@property
def dd_mesh_lower_left(self):
return self._dd_mesh_lower_left
@property
def dd_mesh_upper_right(self):
return self._dd_mesh_upper_right
@property
def dd_nodemap(self):
return self._dd_nodemap
@property
def dd_allow_leakage(self):
return self._dd_allow_leakage
@property
def dd_count_interactions(self):
return self._dd_count_interactions
@property
def resonance_scattering(self):
return self._resonance_scattering
@property
def volume_calculations(self):
return self._volume_calculations
@property
def create_fission_neutrons(self):
return self._create_fission_neutrons
@run_mode.setter
def run_mode(self, run_mode):
cv.check_value('run mode', run_mode, _RUN_MODES)
self._run_mode = run_mode
@batches.setter
def batches(self, batches):
cv.check_type('batches', batches, Integral)
cv.check_greater_than('batches', batches, 0)
self._batches = batches
@generations_per_batch.setter
def generations_per_batch(self, generations_per_batch):
cv.check_type('generations per patch', generations_per_batch, Integral)
cv.check_greater_than('generations per batch', generations_per_batch, 0)
self._generations_per_batch = generations_per_batch
@inactive.setter
def inactive(self, inactive):
cv.check_type('inactive batches', inactive, Integral)
cv.check_greater_than('inactive batches', inactive, 0, True)
self._inactive = inactive
@particles.setter
def particles(self, particles):
cv.check_type('particles', particles, Integral)
cv.check_greater_than('particles', particles, 0)
self._particles = particles
@keff_trigger.setter
def keff_trigger(self, keff_trigger):
if not isinstance(keff_trigger, dict):
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'is not a Python dictionary'.format(keff_trigger)
raise ValueError(msg)
elif 'type' not in keff_trigger:
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'does not have a "type" key'.format(keff_trigger)
raise ValueError(msg)
elif keff_trigger['type'] not in ['variance', 'std_dev', 'rel_err']:
msg = 'Unable to set a trigger on keff with ' \
'type "{0}"'.format(keff_trigger['type'])
raise ValueError(msg)
elif 'threshold' not in keff_trigger:
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'does not have a "threshold" key'.format(keff_trigger)
raise ValueError(msg)
elif not isinstance(keff_trigger['threshold'], Real):
msg = 'Unable to set a trigger on keff with ' \
'threshold "{0}"'.format(keff_trigger['threshold'])
raise ValueError(msg)
self._keff_trigger = keff_trigger
@energy_mode.setter
def energy_mode(self, energy_mode):
cv.check_value('energy mode', energy_mode,
['continuous-energy', 'multi-group'])
self._energy_mode = energy_mode
@max_order.setter
def max_order(self, max_order):
if max_order is not None:
cv.check_type('maximum scattering order', max_order, Integral)
cv.check_greater_than('maximum scattering order', max_order, 0,
True)
self._max_order = max_order
@source.setter
def source(self, source):
if not isinstance(source, MutableSequence):
source = [source]
self._source = cv.CheckedList(Source, 'source distributions', source)
@output.setter
def output(self, output):
if not isinstance(output, dict):
msg = 'Unable to set output to "{0}" which is not a Python ' \
'dictionary of string keys and boolean values'.format(output)
raise ValueError(msg)
for element in output:
keys = ['summary', 'cross_sections', 'tallies', 'distribmats']
if element not in keys:
msg = 'Unable to set output to "{0}" which is unsupported by ' \
'OpenMC'.format(element)
raise ValueError(msg)
if not isinstance(output[element], (bool, np.bool)):
msg = 'Unable to set output for "{0}" to a non-boolean ' \
'value "{1}"'.format(element, output[element])
raise ValueError(msg)
self._output = output
@output_path.setter
def output_path(self, output_path):
cv.check_type('output path', output_path, string_types)
self._output_path = output_path
@verbosity.setter
def verbosity(self, verbosity):
cv.check_type('verbosity', verbosity, Integral)
cv.check_greater_than('verbosity', verbosity, 1, True)
cv.check_less_than('verbosity', verbosity, 10, True)
self._verbosity = verbosity
@sourcepoint.setter
def sourcepoint(self, sourcepoint):
cv.check_type('sourcepoint options', sourcepoint, Mapping)
for key, value in sourcepoint.items():
if key == 'batches':
cv.check_type('sourcepoint batches', value, Iterable, Integral)
for batch in value:
cv.check_greater_than('sourcepoint batch', batch, 0)
elif key == 'separate':
cv.check_type('sourcepoint separate', value, bool)
elif key == 'write':
cv.check_type('sourcepoint write', value, bool)
elif key == 'overwrite':
cv.check_type('sourcepoint overwrite', value, bool)
else:
raise ValueError("Unknown key '{}' encountered when setting "
"sourcepoint options.".format(key))
self._sourcepoint = sourcepoint
@statepoint.setter
def statepoint(self, statepoint):
cv.check_type('statepoint options', statepoint, Mapping)
for key, value in statepoint.items():
if key == 'batches':
cv.check_type('statepoint batches', value, Iterable, Integral)
for batch in value:
cv.check_greater_than('statepoint batch', batch, 0)
else:
raise ValueError("Unknown key '{}' encountered when setting "
"statepoint options.".format(key))
self._statepoint = statepoint
@confidence_intervals.setter
def confidence_intervals(self, confidence_intervals):
cv.check_type('confidence interval', confidence_intervals, bool)
self._confidence_intervals = confidence_intervals
@cross_sections.setter
def cross_sections(self, cross_sections):
warnings.warn('Settings.cross_sections has been deprecated and will be '
'removed in a future version. Materials.cross_sections '
'should defined instead.', DeprecationWarning)
cv.check_type('cross sections', cross_sections, string_types)
self._cross_sections = cross_sections
@multipole_library.setter
def multipole_library(self, multipole_library):
warnings.warn('Settings.multipole_library has been deprecated and will '
'be removed in a future version. '
'Materials.multipole_library should defined instead.',
DeprecationWarning)
cv.check_type('multipole library', multipole_library, string_types)
self._multipole_library = multipole_library
@ptables.setter
def ptables(self, ptables):
cv.check_type('probability tables', ptables, bool)
self._ptables = ptables
@run_cmfd.setter
def run_cmfd(self, run_cmfd):
cv.check_type('run_cmfd', run_cmfd, bool)
self._run_cmfd = run_cmfd
@seed.setter
def seed(self, seed):
cv.check_type('random number generator seed', seed, Integral)
cv.check_greater_than('random number generator seed', seed, 0)
self._seed = seed
@survival_biasing.setter
def survival_biasing(self, survival_biasing):
cv.check_type('survival biasing', survival_biasing, bool)
self._survival_biasing = survival_biasing
@cutoff.setter
def cutoff(self, cutoff):
if not isinstance(cutoff, Mapping):
msg = 'Unable to set cutoff from "{0}" which is not a '\
' Python dictionary'.format(cutoff)
raise ValueError(msg)
for key in cutoff:
if key == 'weight':
cv.check_type('weight cutoff', cutoff['weight'], Real)
cv.check_greater_than('weight cutoff', cutoff['weight'], 0.0)
elif key == 'weight_avg':
cv.check_type('average survival weight', cutoff['weight_avg'],
Real)
cv.check_greater_than('average survival weight',
cutoff['weight_avg'], 0.0)
elif key == 'energy':
cv.check_type('energy cutoff', cutoff['energy'], Real)
cv.check_greater_than('energy cutoff', cutoff['energy'], 0.0)
else:
msg = 'Unable to set cutoff to "{0}" which is unsupported by '\
'OpenMC'.format(key)
self._cutoff = cutoff
@entropy_mesh.setter
def entropy_mesh(self, entropy):
cv.check_type('entropy mesh', entropy, Mesh)
cv.check_length('entropy mesh dimension', entropy.dimension, 3)
cv.check_length('entropy mesh lower-left corner', entropy.lower_left, 3)
cv.check_length('entropy mesh upper-right corner', entropy.upper_right, 3)
self._entropy_mesh = entropy
@trigger_active.setter
def trigger_active(self, trigger_active):
cv.check_type('trigger active', trigger_active, bool)
self._trigger_active = trigger_active
@trigger_max_batches.setter
def trigger_max_batches(self, trigger_max_batches):
cv.check_type('trigger maximum batches', trigger_max_batches, Integral)
cv.check_greater_than('trigger maximum batches', trigger_max_batches, 0)
self._trigger_max_batches = trigger_max_batches
@trigger_batch_interval.setter
def trigger_batch_interval(self, trigger_batch_interval):
cv.check_type('trigger batch interval', trigger_batch_interval, Integral)
cv.check_greater_than('trigger batch interval', trigger_batch_interval, 0)
self._trigger_batch_interval = trigger_batch_interval
@no_reduce.setter
def no_reduce(self, no_reduce):
cv.check_type('no reduction option', no_reduce, bool)
self._no_reduce = no_reduce
@tabular_legendre.setter
def tabular_legendre(self, tabular_legendre):
cv.check_type('tabular_legendre settings', tabular_legendre, Mapping)
for key, value in tabular_legendre.items():
cv.check_value('tabular_legendre key', key,
['enable', 'num_points'])
if key == 'enable':
cv.check_type('enable tabular_legendre', value, bool)
elif key == 'num_points':
cv.check_type('num_points tabular_legendre', value, Integral)
cv.check_greater_than('num_points tabular_legendre', value, 0)
self._tabular_legendre = tabular_legendre
@temperature.setter
def temperature(self, temperature):
cv.check_type('temperature settings', temperature, Mapping)
for key, value in temperature.items():
cv.check_value('temperature key', key,
['default', 'method', 'tolerance', 'multipole'])
if key == 'default':
cv.check_type('default temperature', value, Real)
elif key == 'method':
cv.check_value('temperature method', value,
['nearest', 'interpolation'])
elif key == 'tolerance':
cv.check_type('temperature tolerance', value, Real)
elif key == 'multipole':
cv.check_type('temperature multipole', value, bool)
self._temperature = temperature
@threads.setter
def threads(self, threads):
cv.check_type('number of threads', threads, Integral)
cv.check_greater_than('number of threads', threads, 0)
self._threads = threads
@trace.setter
def trace(self, trace):
cv.check_type('trace', trace, Iterable, Integral)
cv.check_length('trace', trace, 3)
cv.check_greater_than('trace batch', trace[0], 0)
cv.check_greater_than('trace generation', trace[1], 0)
cv.check_greater_than('trace particle', trace[2], 0)
self._trace = trace
@track.setter
def track(self, track):
cv.check_type('track', track, Iterable, Integral)
if len(track) % 3 != 0:
msg = 'Unable to set the track to "{0}" since its length is ' \
'not a multiple of 3'.format(track)
raise ValueError(msg)
for t in zip(track[::3], track[1::3], track[2::3]):
cv.check_greater_than('track batch', t[0], 0)
cv.check_greater_than('track generation', t[0], 0)
cv.check_greater_than('track particle', t[0], 0)
self._track = track
@ufs_mesh.setter
def ufs_mesh(self, ufs_mesh):
cv.check_type('UFS mesh', ufs_mesh, Mesh)
cv.check_length('UFS mesh dimension', ufs_mesh.dimension, 3)
cv.check_length('UFS mesh lower-left corner', ufs_mesh.lower_left, 3)
cv.check_length('UFS mesh upper-right corner', ufs_mesh.upper_right, 3)
self._ufs_mesh = ufs_mesh
@dd_mesh_dimension.setter
def dd_mesh_dimension(self, dimension):
# TODO: remove this when domain decomposition is merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
cv.check_type('DD mesh dimension', dimension, Iterable, Integral)
cv.check_length('DD mesh dimension', dimension, 3)
self._dd_mesh_dimension = dimension
@dd_mesh_lower_left.setter
def dd_mesh_lower_left(self, lower_left):
# TODO: remove this when domain decomposition is merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
cv.check_type('DD mesh lower left corner', lower_left, Iterable, Real)
cv.check_length('DD mesh lower left corner', lower_left, 3)
self._dd_mesh_lower_left = lower_left
@dd_mesh_upper_right.setter
def dd_mesh_upper_right(self, upper_right):
# TODO: remove this when domain decomposition is merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
cv.check_type('DD mesh upper right corner', upper_right, Iterable, Real)
cv.check_length('DD mesh upper right corner', upper_right, 3)
self._dd_mesh_upper_right = upper_right
@dd_nodemap.setter
def dd_nodemap(self, nodemap):
# TODO: remove this when domain decomposition is merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
cv.check_type('DD nodemap', nodemap, Iterable)
nodemap = np.array(nodemap).flatten()
if self._dd_mesh_dimension is None:
msg = 'Must set DD mesh dimension before setting the nodemap'
raise ValueError(msg)
else:
len_nodemap = np.prod(self._dd_mesh_dimension)
if len(nodemap) < len_nodemap or len(nodemap) > len_nodemap:
msg = 'Unable to set DD nodemap with length "{0}" which ' \
'does not have the same dimensionality as the domain ' \
'mesh'.format(len(nodemap))
raise ValueError(msg)
self._dd_nodemap = nodemap
@dd_allow_leakage.setter
def dd_allow_leakage(self, allow):
# TODO: remove this when domain decomposition is merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
cv.check_type('DD allow leakage', allow, bool)
self._dd_allow_leakage = allow
@dd_count_interactions.setter
def dd_count_interactions(self, interactions):
# TODO: remove this when domain decomposition is merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
cv.check_type('DD count interactions', interactions, bool)
self._dd_count_interactions = interactions
@resonance_scattering.setter
def resonance_scattering(self, res):
cv.check_type('resonance scattering settings', res, Mapping)
keys = ('enable', 'method', 'energy_min', 'energy_max', 'nuclides')
for key, value in res.items():
cv.check_value('resonance scattering dictionary key', key, keys)
if key == 'enable':
cv.check_type('resonance scattering enable', value, bool)
elif key == 'method':
cv.check_value('resonance scattering method', value,
_RES_SCAT_METHODS)
elif key == 'energy_min':
name = 'resonance scattering minimum energy'
cv.check_type(name, value, Real)
cv.check_greater_than(name, value, 0)
elif key == 'energy_max':
name = 'resonance scattering minimum energy'
cv.check_type(name, value, Real)
cv.check_greater_than(name, value, 0)
elif key == 'nuclides':
cv.check_type('resonance scattering nuclides', value,
Iterable, string_types)
self._resonance_scattering = res
@volume_calculations.setter
def volume_calculations(self, vol_calcs):
if not isinstance(vol_calcs, MutableSequence):
vol_calcs = [vol_calcs]
self._volume_calculations = cv.CheckedList(
VolumeCalculation, 'stochastic volume calculations', vol_calcs)
@create_fission_neutrons.setter
def create_fission_neutrons(self, create_fission_neutrons):
cv.check_type('Whether create fission neutrons',
create_fission_neutrons, bool)
self._create_fission_neutrons = create_fission_neutrons
def _create_run_mode_subelement(self, root):
elem = ET.SubElement(root, "run_mode")
elem.text = self._run_mode
def _create_batches_subelement(self, run_mode_element):
if self._batches is not None:
element = ET.SubElement(run_mode_element, "batches")
element.text = str(self._batches)
def _create_generations_per_batch_subelement(self, run_mode_element):
if self._generations_per_batch is not None:
element = ET.SubElement(run_mode_element, "generations_per_batch")
element.text = str(self._generations_per_batch)
def _create_inactive_subelement(self, run_mode_element):
if self._inactive is not None:
element = ET.SubElement(run_mode_element, "inactive")
element.text = str(self._inactive)
def _create_particles_subelement(self, run_mode_element):
if self._particles is not None:
element = ET.SubElement(run_mode_element, "particles")
element.text = str(self._particles)
def _create_keff_trigger_subelement(self, run_mode_element):
if self._keff_trigger is not None:
element = ET.SubElement(run_mode_element, "keff_trigger")
for key in self._keff_trigger:
subelement = ET.SubElement(element, key)
subelement.text = str(self._keff_trigger[key]).lower()
def _create_energy_mode_subelement(self, root):
if self._energy_mode is not None:
element = ET.SubElement(root, "energy_mode")
element.text = str(self._energy_mode)
def _create_max_order_subelement(self, root):
if self._max_order is not None:
element = ET.SubElement(root, "max_order")
element.text = str(self._max_order)
def _create_source_subelement(self, root):
for source in self.source:
root.append(source.to_xml_element())
def _create_volume_calcs_subelement(self, root):
for calc in self.volume_calculations:
root.append(calc.to_xml_element())
def _create_output_subelement(self, root):
if self._output is not None:
element = ET.SubElement(root, "output")
for key in self._output:
subelement = ET.SubElement(element, key)
subelement.text = str(self._output[key]).lower()
if self._output_path is not None:
element = ET.SubElement(root, "output_path")
element.text = self._output_path
def _create_verbosity_subelement(self, root):
if self._verbosity is not None:
element = ET.SubElement(root, "verbosity")
element.text = str(self._verbosity)
def _create_statepoint_subelement(self, root):
if self._statepoint:
element = ET.SubElement(root, "state_point")
if 'batches' in self._statepoint:
subelement = ET.SubElement(element, "batches")
subelement.text = ' '.join(
str(x) for x in self._statepoint['batches'])
def _create_sourcepoint_subelement(self, root):
if self._sourcepoint:
element = ET.SubElement(root, "source_point")
if 'batches' in self._sourcepoint:
subelement = ET.SubElement(element, "batches")
subelement.text = ' '.join(
str(x) for x in self._sourcepoint['batches'])
if 'separate' in self._sourcepoint:
subelement = ET.SubElement(element, "separate")
subelement.text = str(self._sourcepoint['separate']).lower()
if 'write' in self._sourcepoint:
subelement = ET.SubElement(element, "write")
subelement.text = str(self._sourcepoint['write']).lower()
# Overwrite latest subelement
if 'overwrite' in self._sourcepoint:
subelement = ET.SubElement(element, "overwrite_latest")
subelement.text = str(self._sourcepoint['overwrite']).lower()
def _create_confidence_intervals(self, root):
if self._confidence_intervals is not None:
element = ET.SubElement(root, "confidence_intervals")
element.text = str(self._confidence_intervals).lower()
def _create_cross_sections_subelement(self, root):
if self._cross_sections is not None:
element = ET.SubElement(root, "cross_sections")
element.text = str(self._cross_sections)
def _create_multipole_library_subelement(self, root):
if self._multipole_library is not None:
element = ET.SubElement(root, "multipole_library")
element.text = str(self._multipole_library)
def _create_ptables_subelement(self, root):
if self._ptables is not None:
element = ET.SubElement(root, "ptables")
element.text = str(self._ptables).lower()
def _create_run_cmfd_subelement(self, root):
if self._run_cmfd is not None:
element = ET.SubElement(root, "run_cmfd")
element.text = str(self._run_cmfd).lower()
def _create_seed_subelement(self, root):
if self._seed is not None:
element = ET.SubElement(root, "seed")
element.text = str(self._seed)
def _create_survival_biasing_subelement(self, root):
if self._survival_biasing is not None:
element = ET.SubElement(root, "survival_biasing")
element.text = str(self._survival_biasing).lower()
def _create_cutoff_subelement(self, root):
if self._cutoff is not None:
element = ET.SubElement(root, "cutoff")
if 'weight' in self._cutoff:
subelement = ET.SubElement(element, "weight")
subelement.text = str(self._cutoff['weight'])
if 'weight_avg' in self._cutoff:
subelement = ET.SubElement(element, "weight_avg")
subelement.text = str(self._cutoff['weight_avg'])
if 'energy' in self._cutoff:
subelement = ET.SubElement(element, "energy")
subelement.text = str(self._cutoff['energy'])
def _create_entropy_subelement(self, root):
if self._entropy_mesh is not None:
element = ET.SubElement(root, "entropy")
if self._entropy_mesh.dimension is not None:
subelement = ET.SubElement(element, "dimension")
subelement.text = ' '.join(
str(x) for x in self._entropy_mesh.dimension)
subelement = ET.SubElement(element, "lower_left")
subelement.text = ' '.join(
str(x) for x in self._entropy_mesh.lower_left)
subelement = ET.SubElement(element, "upper_right")
subelement.text = ' '.join(
str(x) for x in self._entropy_mesh.upper_right)
def _create_trigger_subelement(self, root):
if self._trigger_active is not None:
trigger_element = ET.SubElement(root, "trigger")
element = ET.SubElement(trigger_element, "active")
element.text = str(self._trigger_active).lower()
if self._trigger_max_batches is not None:
element = ET.SubElement(trigger_element, "max_batches")
element.text = str(self._trigger_max_batches)
if self._trigger_batch_interval is not None:
element = ET.SubElement(trigger_element, "batch_interval")
element.text = str(self._trigger_batch_interval)
def _create_no_reduce_subelement(self, root):
if self._no_reduce is not None:
element = ET.SubElement(root, "no_reduce")
element.text = str(self._no_reduce).lower()
def _create_tabular_legendre_subelements(self, root):
if self.tabular_legendre:
element = ET.SubElement(root, "tabular_legendre")
subelement = ET.SubElement(element, "enable")
subelement.text = str(self._tabular_legendre['enable']).lower()
if 'num_points' in self._tabular_legendre:
subelement = ET.SubElement(element, "num_points")
subelement.text = str(self._tabular_legendre['num_points'])
def _create_temperature_subelements(self, root):
if self.temperature:
for key, value in sorted(self.temperature.items()):
element = ET.SubElement(root,
"temperature_{}".format(key))
element.text = str(value)
def _create_threads_subelement(self, root):
if self._threads is not None:
element = ET.SubElement(root, "threads")
element.text = str(self._threads)
def _create_trace_subelement(self, root):
if self._trace is not None:
element = ET.SubElement(root, "trace")
element.text = ' '.join(map(str, self._trace))
def _create_track_subelement(self, root):
if self._track is not None:
element = ET.SubElement(root, "track")
element.text = ' '.join(map(str, self._track))
def _create_ufs_subelement(self, root):
if self._ufs_mesh is not None:
element = ET.SubElement(root, "uniform_fs")
subelement = ET.SubElement(element, "dimension")
subelement.text = ' '.join(str(x) for x in
self._ufs_mesh.dimension)
subelement = ET.SubElement(element, "lower_left")
subelement.text = ' '.join(str(x) for x in
self._ufs_mesh.lower_left)
subelement = ET.SubElement(element, "upper_right")
subelement.text = ' '.join(str(x) for x in
self._ufs_mesh.upper_right)
def _create_dd_subelement(self, root):
if self._dd_mesh_lower_left is not None and \
self._dd_mesh_upper_right is not None and \
self._dd_mesh_dimension is not None:
element = ET.SubElement(root, "domain_decomposition")
subelement = ET.SubElement(element, "mesh")
subsubelement = ET.SubElement(subelement, "dimension")
subsubelement.text = ' '.join(map(str, self._dd_mesh_dimension))
subsubelement = ET.SubElement(subelement, "lower_left")
subsubelement.text = ' '.join(map(str, self._dd_mesh_lower_left))
subsubelement = ET.SubElement(subelement, "upper_right")
subsubelement.text = ' '.join(map(str, self._dd_mesh_upper_right))
if self._dd_nodemap is not None:
subelement = ET.SubElement(element, "nodemap")
subelement.text = ' '.join(map(str, self._dd_nodemap))
subelement = ET.SubElement(element, "allow_leakage")
subelement.text = str(self._dd_allow_leakage).lower()
subelement = ET.SubElement(element, "count_interactions")
subelement.text = str(self._dd_count_interactions).lower()
def _create_resonance_scattering_subelement(self, root):
res = self.resonance_scattering
if res:
elem = ET.SubElement(root, 'resonance_scattering')
if 'enable' in res:
subelem = ET.SubElement(elem, 'enable')
subelem.text = str(res['enable']).lower()
if 'method' in res:
subelem = ET.SubElement(elem, 'method')
subelem.text = res['method']
if 'energy_min' in res:
subelem = ET.SubElement(elem, 'energy_min')
subelem.text = str(res['energy_min'])
if 'energy_max' in res:
subelem = ET.SubElement(elem, 'energy_max')
subelem.text = str(res['energy_max'])
if 'nuclides' in res:
subelem = ET.SubElement(elem, 'nuclides')
subelem.text = ' '.join(res['nuclides'])
def _create_create_fission_neutrons_subelement(self, root):
if self._create_fission_neutrons is not None:
elem = ET.SubElement(root, "create_fission_neutrons")
elem.text = str(self._create_fission_neutrons).lower()
def export_to_xml(self, path='settings.xml'):
"""Export simulation settings to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'settings.xml'.
"""
# Reset xml element tree
root_element = ET.Element("settings")
self._create_run_mode_subelement(root_element)
self._create_particles_subelement(root_element)
self._create_batches_subelement(root_element)
self._create_inactive_subelement(root_element)
self._create_generations_per_batch_subelement(root_element)
self._create_keff_trigger_subelement(root_element)
self._create_source_subelement(root_element)
self._create_output_subelement(root_element)
self._create_statepoint_subelement(root_element)
self._create_sourcepoint_subelement(root_element)
self._create_confidence_intervals(root_element)
self._create_cross_sections_subelement(root_element)
self._create_multipole_library_subelement(root_element)
self._create_energy_mode_subelement(root_element)
self._create_max_order_subelement(root_element)
self._create_ptables_subelement(root_element)
self._create_run_cmfd_subelement(root_element)
self._create_seed_subelement(root_element)
self._create_survival_biasing_subelement(root_element)
self._create_cutoff_subelement(root_element)
self._create_entropy_subelement(root_element)
self._create_trigger_subelement(root_element)
self._create_no_reduce_subelement(root_element)
self._create_threads_subelement(root_element)
self._create_verbosity_subelement(root_element)
self._create_tabular_legendre_subelements(root_element)
self._create_temperature_subelements(root_element)
self._create_trace_subelement(root_element)
self._create_track_subelement(root_element)
self._create_ufs_subelement(root_element)
self._create_dd_subelement(root_element)
self._create_resonance_scattering_subelement(root_element)
self._create_volume_calcs_subelement(root_element)
self._create_create_fission_neutrons_subelement(root_element)
# Clean the indentation in the file to be user-readable
clean_xml_indentation(root_element)
# Write the XML Tree to the settings.xml file
tree = ET.ElementTree(root_element)
tree.write(path, xml_declaration=True, encoding='utf-8', method="xml")
|
|
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from pycsw.core import util
from pycsw.core.etree import etree
from pycsw.ogc.gml import gml3
LOGGER = logging.getLogger(__name__)
MODEL = {
'Conformance': {
'values': [
'ImplementsQuery',
'ImplementsAdHocQuery',
'ImplementsFunctions',
'ImplementsResourceld',
'ImplementsMinStandardFilter',
'ImplementsStandardFilter',
'ImplementsMinSpatialFilter',
'ImplementsSpatialFilter',
'ImplementsMinTemporalFilter',
'ImplementsTemporalFilter',
'ImplementsVersionNav',
'ImplementsSorting',
'ImplementsExtendedOperators',
'ImplementsMinimumXPath',
'ImplementsSchemaElementFunc'
]
},
'GeometryOperands': {
'values': gml3.TYPES
},
'SpatialOperators': {
'values': ['BBOX', 'Beyond', 'Contains', 'Crosses', 'Disjoint',
'DWithin', 'Equals', 'Intersects', 'Overlaps', 'Touches', 'Within']
},
'ComparisonOperators': {
'fes20:PropertyIsBetween': {'opname': 'PropertyIsBetween', 'opvalue': 'and'},
'fes20:PropertyIsEqualTo': {'opname': 'PropertyIsEqualTo', 'opvalue': '='},
'fes20:PropertyIsGreaterThan': {'opname': 'PropertyIsGreaterThan', 'opvalue': '>'},
'fes20:PropertyIsGreaterThanOrEqualTo': {
'opname': 'PropertyIsGreaterThanOrEqualTo', 'opvalue': '>='},
'fes20:PropertyIsLessThan': {'opname': 'PropertyIsLessThan', 'opvalue': '<'},
'fes20:PropertyIsLessThanOrEqualTo': {
'opname': 'PropertyIsLessThanOrEqualTo', 'opvalue': '<='},
'fes20:PropertyIsLike': {'opname': 'PropertyIsLike', 'opvalue': 'like'},
'fes20:PropertyIsNotEqualTo': {'opname': 'PropertyIsNotEqualTo', 'opvalue': '!='},
'fes20:PropertyIsNull': {'opname': 'PropertyIsNull', 'opvalue': 'is null'},
},
'Functions': {
'length': {'returns': 'xs:string'},
'lower': {'returns': 'xs:string'},
'ltrim': {'returns': 'xs:string'},
'rtrim': {'returns': 'xs:string'},
'trim': {'returns': 'xs:string'},
'upper': {'returns': 'xs:string'},
},
'Ids': {
'values': ['csw30:id']
}
}
def parse(element, queryables, dbtype, nsmap, orm='sqlalchemy', language='english', fts=False):
"""OGC Filter object support"""
boq = None
is_pg = dbtype.startswith('postgresql')
tmp = element.xpath('fes20:And|fes20:Or|fes20:Not', namespaces=nsmap)
if len(tmp) > 0: # this is binary logic query
element_name = etree.QName(tmp[0]).localname
boq = ' %s ' % element_name.lower()
LOGGER.debug('Binary logic detected; operator=%s', boq)
tmp = tmp[0]
else:
tmp = element
pvalue_serial = [0]
def assign_param():
if orm == 'django':
return '%s'
param = ':pvalue%d' % pvalue_serial[0]
pvalue_serial[0] += 1
return param
def _get_comparison_expression(elem):
"""return the SQL expression based on Filter query"""
fname = None
matchcase = elem.attrib.get('matchCase')
wildcard = elem.attrib.get('wildCard')
singlechar = elem.attrib.get('singleChar')
expression = None
if wildcard is None:
wildcard = '%'
if singlechar is None:
singlechar = '_'
if (elem.xpath('child::*')[0].tag ==
util.nspath_eval('fes20:Function', nsmap)):
LOGGER.debug('fes20:Function detected')
if (elem.xpath('child::*')[0].attrib['name'] not in
MODEL['Functions']):
raise RuntimeError('Invalid fes20:Function: %s' %
(elem.xpath('child::*')[0].attrib['name']))
fname = elem.xpath('child::*')[0].attrib['name']
try:
LOGGER.debug('Testing existence of fes20:ValueReference')
pname = queryables[elem.find(util.nspath_eval('fes20:Function/fes20:ValueReference', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' % (elem.find(util.nspath_eval('fes20:Function/fes20:ValueReference', nsmap)).text, str(err))) from err
else:
try:
LOGGER.debug('Testing existence of fes20:ValueReference')
pname = queryables[elem.find(
util.nspath_eval('fes20:ValueReference', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' %
(elem.find(util.nspath_eval('fes20:ValueReference',
nsmap)).text, str(err))) from err
if (elem.tag != util.nspath_eval('fes20:PropertyIsBetween', nsmap)):
if elem.tag in [util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
return "%s = %s" % (_get_spatial_operator(queryables['pycsw:BoundingBox'], elem, dbtype, nsmap), boolean_true)
else:
pval = elem.find(util.nspath_eval('fes20:Literal', nsmap)).text
com_op = _get_comparison_operator(elem)
LOGGER.debug('Comparison operator: %s', com_op)
# if this is a case insensitive search
# then set the DB-specific LIKE comparison operator
LOGGER.debug('Setting csw:AnyText property')
anytext = queryables['csw:AnyText']['dbcol']
if ((matchcase is not None and matchcase == 'false') or
pname == anytext):
com_op = 'ilike' if is_pg else 'like'
if (elem.tag == util.nspath_eval('fes20:PropertyIsBetween', nsmap)):
com_op = 'between'
lower_boundary = elem.find(
util.nspath_eval('fes20:LowerBoundary/fes20:Literal',
nsmap)).text
upper_boundary = elem.find(
util.nspath_eval('fes20:UpperBoundary/fes20:Literal',
nsmap)).text
expression = "%s %s %s and %s" % \
(pname, com_op, assign_param(), assign_param())
values.append(lower_boundary)
values.append(upper_boundary)
else:
if pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
# do nothing, let FTS do conversion (#212)
pvalue = pval
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
pvalue = pval.replace(wildcard, '%').replace(singlechar, '_')
if pname == anytext: # pad anytext with wildcards
LOGGER.debug('PostgreSQL non-FTS specific anytext search')
LOGGER.debug('old value: %s', pval)
pvalue = '%%%s%%' % pvalue.rstrip('%').lstrip('%')
LOGGER.debug('new value: %s', pvalue)
values.append(pvalue)
if boq == ' not ':
if fname is not None:
expression = "%s is null or not %s(%s) %s %s" % \
(pname, fname, pname, com_op, assign_param())
elif pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
expression = ("%s is null or not plainto_tsquery('%s', %s) @@ anytext_tsvector" %
(anytext, language, assign_param()))
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
expression = "%s is null or not %s %s %s" % \
(pname, pname, com_op, assign_param())
else:
if fname is not None:
expression = "%s(%s) %s %s" % \
(fname, pname, com_op, assign_param())
elif pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
expression = ("plainto_tsquery('%s', %s) @@ anytext_tsvector" %
(language, assign_param()))
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
expression = "%s %s %s" % (pname, com_op, assign_param())
return expression
queries = []
queries_nested = []
values = []
LOGGER.debug('Scanning children elements')
for child in tmp.xpath('child::*'):
com_op = ''
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
if child.tag == util.nspath_eval('fes20:Not', nsmap):
LOGGER.debug('fes20:Not query detected')
child_not = child.xpath('child::*')[0]
if child_not.tag in \
[util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
LOGGER.debug('fes20:Not / spatial operator detected: %s', child.tag)
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child.xpath('child::*')[0], dbtype, nsmap),
boolean_false))
else:
LOGGER.debug('fes20:Not / comparison operator detected: %s', child.tag)
queries.append('not %s' % _get_comparison_expression(child_not))
elif child.tag in \
[util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
LOGGER.debug('spatial operator detected: %s', child.tag)
if boq is not None and boq == ' not ':
# for fes20:Not spatial queries in PostGIS we must explictly
# test that pycsw:BoundingBox is null as well
# TODO: Do we need the same for 'postgresql+postgis+native'???
if dbtype == 'postgresql+postgis+wkt':
LOGGER.debug('Setting bbox is null test in PostgreSQL')
queries.append("%s = %s or %s is null" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_false,
queryables['pycsw:BoundingBox']))
else:
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_false))
else:
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_true))
elif child.tag == util.nspath_eval('fes20:FeatureId', nsmap):
LOGGER.debug('fes20:FeatureId filter detected')
queries.append("%s = %s" % (queryables['pycsw:Identifier'], assign_param()))
values.append(child.attrib.get('fid'))
else: # comparison operator
LOGGER.debug('Comparison operator processing')
child_tag_name = etree.QName(child).localname
tagname = ' %s ' % child_tag_name.lower()
if tagname in [' or ', ' and ']: # this is a nested binary logic query
LOGGER.debug('Nested binary logic detected; operator=%s', tagname)
for child2 in child.xpath('child::*'):
queries_nested.append(_get_comparison_expression(child2))
queries.append('(%s)' % tagname.join(queries_nested))
else:
queries.append(_get_comparison_expression(child))
where = boq.join(queries) if (boq is not None and boq != ' not ') \
else queries[0]
return where, values
def _get_spatial_operator(geomattr, element, dbtype, nsmap, postgis_geometry_column='wkb_geometry'):
"""return the spatial predicate function"""
property_name = element.find(util.nspath_eval('fes20:ValueReference', nsmap))
distance = element.find(util.nspath_eval('fes20:Distance', nsmap))
distance = 'false' if distance is None else distance.text
LOGGER.debug('Scanning for spatial property name')
if property_name is None:
raise RuntimeError('Missing fes20:ValueReference in spatial filter')
if (property_name.text.find('BoundingBox') == -1 and
property_name.text.find('Envelope') == -1):
raise RuntimeError('Invalid fes20:ValueReference in spatial filter: %s' %
property_name.text)
geometry = gml3.Geometry(element, nsmap)
#make decision to apply spatial ranking to results
set_spatial_ranking(geometry)
spatial_predicate = etree.QName(element).localname.lower()
LOGGER.debug('Spatial predicate: %s', spatial_predicate)
if dbtype == 'mysql': # adjust spatial query for MySQL
LOGGER.debug('Adjusting spatial query for MySQL')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "ifnull(distance(geomfromtext(%s), \
geomfromtext('%s')) > convert(%s, signed),false)" % \
(geomattr, geometry.wkt, distance)
elif spatial_predicate == 'dwithin':
spatial_query = "ifnull(distance(geomfromtext(%s), \
geomfromtext('%s')) <= convert(%s, signed),false)" % \
(geomattr, geometry.wkt, distance)
else:
spatial_query = "ifnull(%s(geomfromtext(%s), \
geomfromtext('%s')),false)" % \
(spatial_predicate, geomattr, geometry.wkt)
elif dbtype == 'postgresql+postgis+wkt': # adjust spatial query for PostGIS with WKT geometry column
LOGGER.debug('Adjusting spatial query for PostgreSQL+PostGIS+WKT')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "not st_dwithin(st_geomfromtext(%s), \
st_geomfromtext('%s'), %f)" % \
(geomattr, geometry.wkt, float(distance))
elif spatial_predicate == 'dwithin':
spatial_query = "st_dwithin(st_geomfromtext(%s), \
st_geomfromtext('%s'), %f)" % \
(geomattr, geometry.wkt, float(distance))
else:
spatial_query = "st_%s(st_geomfromtext(%s), \
st_geomfromtext('%s'))" % \
(spatial_predicate, geomattr, geometry.wkt)
elif dbtype == 'postgresql+postgis+native': # adjust spatial query for PostGIS with native geometry
LOGGER.debug('Adjusting spatial query for PostgreSQL+PostGIS+native')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "not st_dwithin(%s, \
st_geomfromtext('%s',4326), %f)" % \
(postgis_geometry_column, geometry.wkt, float(distance))
elif spatial_predicate == 'dwithin':
spatial_query = "st_dwithin(%s, \
st_geomfromtext('%s',4326), %f)" % \
(postgis_geometry_column, geometry.wkt, float(distance))
else:
spatial_query = "st_%s(%s, \
st_geomfromtext('%s',4326))" % \
(spatial_predicate, postgis_geometry_column, geometry.wkt)
else:
LOGGER.debug('Adjusting spatial query')
spatial_query = "query_spatial(%s,'%s','%s','%s')" % \
(geomattr, geometry.wkt, spatial_predicate, distance)
return spatial_query
def _get_comparison_operator(element):
"""return the SQL operator based on Filter query"""
element_name = etree.QName(element).localname
return MODEL['ComparisonOperators']['fes20:%s' % element_name]['opvalue']
def set_spatial_ranking(geometry):
"""Given that we have a spatial query in fes20:Filter we check the type of geometry
and set the ranking variables"""
if util.ranking_enabled:
if geometry.type in ['Polygon', 'Envelope']:
util.ranking_pass = True
util.ranking_query_geometry = geometry.wkt
elif geometry.type in ['LineString', 'Point']:
from shapely.geometry.base import BaseGeometry
from shapely.geometry import box
from shapely.wkt import loads,dumps
ls = loads(geometry.wkt)
b = ls.bounds
if geometry.type == 'LineString':
tmp_box = box(b[0],b[1],b[2],b[3])
tmp_wkt = dumps(tmp_box)
if tmp_box.area > 0:
util.ranking_pass = True
util.ranking_query_geometry = tmp_wkt
elif geometry.type == 'Point':
tmp_box = box((float(b[0])-1.0),(float(b[1])-1.0),(float(b[2])+1.0),(float(b[3])+1.0))
tmp_wkt = dumps(tmp_box)
util.ranking_pass = True
util.ranking_query_geometry = tmp_wkt
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General IAM utilities used by the Cloud SDK."""
from apitools.base.protorpclite import messages as apitools_messages
from apitools.base.py import encoding
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions as gcloud_exceptions
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import yaml
msgs = core_apis.GetMessagesModule('iam', 'v1')
MANAGED_BY = (msgs.IamProjectsServiceAccountsKeysListRequest
.KeyTypesValueValuesEnum)
CREATE_KEY_TYPES = (msgs.CreateServiceAccountKeyRequest
.PrivateKeyTypeValueValuesEnum)
KEY_TYPES = (msgs.ServiceAccountKey.PrivateKeyTypeValueValuesEnum)
PUBLIC_KEY_TYPES = (
msgs.IamProjectsServiceAccountsKeysGetRequest.PublicKeyTypeValueValuesEnum)
class IamEtagReadError(core_exceptions.Error):
"""IamEtagReadError is raised when etag is badly formatted."""
def _AddRoleArgument(
parser, help_text, completion_resource_arg, completion_resource_collection):
"""Helper function to add the --role flag with remote completion."""
def CompletionCallback(parsed_args):
resource_ref = resources.REGISTRY.Parse(
getattr(parsed_args, completion_resource_arg),
collection=completion_resource_collection)
resource_uri = resource_ref.SelfLink()
return ['beta', 'iam', 'list-grantable-roles', '--format=value(name)',
resource_uri]
have_completion = (completion_resource_arg and completion_resource_collection)
parser.add_argument(
'--role', required=True,
completion_resource='iam.roles' if have_completion else None,
list_command_callback_fn=CompletionCallback if have_completion else None,
help=help_text)
def AddArgsForAddIamPolicyBinding(
parser, completion_resource_arg=None, completion_resource_collection=None):
"""Adds the IAM policy binding arguments for role and members.
Args:
parser: An argparse.ArgumentParser-like object to which we add the argss.
completion_resource_arg: str, Name of the argument that holds the resource
upon which the policy is applied to.
completion_resource_collection: str, Collection of the resource.
completion_resource_arg and completion_resource_collection are optional,
but role tab completion is not possible without specifying them.
Raises:
ArgumentError if one of the arguments is already defined in the parser.
"""
_AddRoleArgument(parser, 'Define the role of the member.',
completion_resource_arg, completion_resource_collection)
parser.add_argument(
'--member', required=True,
help='The member to add to the binding.')
def AddArgsForRemoveIamPolicyBinding(
parser, completion_resource_arg=None, completion_resource_collection=None):
"""Adds the IAM policy binding arguments for role and members.
Args:
parser: An argparse.ArgumentParser-like object to which we add the argss.
completion_resource_arg: str, Name of the argument that hold the resource
upon which the policy is applied to.
completion_resource_collection: str, Collection of the resource.
completion_resource_arg and completion_resource_collection are optional,
but role tab completion is not possible without specifying them.
Raises:
ArgumentError if one of the arguments is already defined in the parser.
"""
_AddRoleArgument(parser, 'The role to remove the member from.',
completion_resource_arg, completion_resource_collection)
parser.add_argument(
'--member', required=True,
help='The member to remove from the binding.')
def AddBindingToIamPolicy(messages, policy, member, role):
"""Given an IAM policy, add new bindings as specified by args.
An IAM binding is a pair of role and member. Check if the arguments passed
define both the role and member attribute, create a binding out of their
values, and append it to the policy.
Args:
messages: ToolResults API message classes generated by apitools.
Required to create new bindings of the proper type.
policy: IAM policy to which we want to add the bindings.
member: The member to add to IAM policy.
role: The role the member should have.
"""
# First check all bindings to see if the member is already in a binding with
# the same role.
# A policy can have multiple bindings with the same role. This is why we need
# to explicitly do this as a separate, first, step and check all bindings.
for binding in policy.bindings:
if binding.role == role:
if member in binding.members:
return # Nothing to do. Member already has the role.
# Second step: check to see if a binding already exists with the same role and
# add the member to this binding. This is to not create new bindings with
# the same role.
for binding in policy.bindings:
if binding.role == role:
binding.members.append(member)
return
# Third step: no binding was found that has the same role. Create a new one.
policy.bindings.append(messages.Binding(
members=[member], role='{0}'.format(role)))
def RemoveBindingFromIamPolicy(policy, member, role):
"""Given an IAM policy, add remove bindings as specified by the args.
An IAM binding is a pair of role and member. Check if the arguments passed
define both the role and member attribute, search the policy for a binding
that contains this role and member, and remove it from the policy.
Args:
policy: IAM policy from which we want to remove bindings.
member: The member to remove from the IAM policy.
role: The role the member should be removed from.
"""
# First, remove the member from any binding that has the given role.
# A server policy can have duplicates.
for binding in policy.bindings:
if binding.role == role and member in binding.members:
binding.members.remove(member)
# Second, remove any empty bindings.
policy.bindings[:] = [b for b in policy.bindings if b.members]
def ParsePolicyFile(policy_file_path, policy_message_type):
"""Construct an IAM Policy protorpc.Message from a JSON or YAML formated file.
Args:
policy_file_path: Path to the JSON or YAML IAM policy file.
policy_message_type: Policy message type to convert JSON or YAML to.
Returns:
a protorpc.Message of type policy_message_type filled in from the JSON or
YAML policy file.
Raises:
BadFileException if the JSON or YAML file is malformed.
"""
try:
policy = ParseJsonPolicyFile(policy_file_path, policy_message_type)
except gcloud_exceptions.BadFileException:
try:
policy = ParseYamlPolicyFile(policy_file_path, policy_message_type)
except gcloud_exceptions.BadFileException:
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted JSON or YAML policy file'
'.'.format(policy_file_path))
if not policy.etag:
msg = ('The specified policy does not contain an "etag" field '
'identifying a specific version to replace. Changing a '
'policy without an "etag" can overwrite concurrent policy '
'changes.')
console_io.PromptContinue(
message=msg, prompt_string='Replace existing policy', cancel_on_no=True)
return policy
def ParseJsonPolicyFile(policy_file_path, policy_message_type):
"""Construct an IAM Policy protorpc.Message from a JSON formated file.
Args:
policy_file_path: Path to the JSON IAM policy file.
policy_message_type: Policy message type to convert JSON to.
Returns:
a protorpc.Message of type policy_message_type filled in from the JSON
policy file.
Raises:
BadFileException if the JSON file is malformed.
IamEtagReadError if the etag is badly formatted.
"""
try:
with open(policy_file_path) as policy_file:
policy_json = policy_file.read()
except EnvironmentError:
# EnvironmnetError is parent of IOError, OSError and WindowsError.
# Raised when file does not exist or can't be opened/read.
raise core_exceptions.Error(
'Unable to read policy file {0}'.format(policy_file_path))
try:
policy = encoding.JsonToMessage(policy_message_type, policy_json)
except (ValueError) as e:
# ValueError is raised when JSON is badly formatted
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted JSON policy file. {1}'
.format(policy_file_path, str(e)))
except (apitools_messages.DecodeError) as e:
# DecodeError is raised when etag is badly formatted (not proper Base64)
raise IamEtagReadError(
'The etag of policy file {0} is not properly formatted. {1}'
.format(policy_file_path, str(e)))
return policy
def ParseYamlPolicyFile(policy_file_path, policy_message_type):
"""Construct an IAM Policy protorpc.Message from a YAML formatted file.
Args:
policy_file_path: Path to the YAML IAM policy file.
policy_message_type: Policy message type to convert YAML to.
Returns:
a protorpc.Message of type policy_message_type filled in from the YAML
policy file.
Raises:
BadFileException if the YAML file is malformed.
IamEtagReadError if the etag is badly formatted.
"""
try:
with open(policy_file_path) as policy_file:
policy_to_parse = yaml.safe_load(policy_file)
except EnvironmentError:
# EnvironmnetError is parent of IOError, OSError and WindowsError.
# Raised when file does not exist or can't be opened/read.
raise core_exceptions.Error('Unable to read policy file {0}'.format(
policy_file_path))
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as e:
# Raised when the YAML file is not properly formatted.
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted YAML policy file. {1}'
.format(policy_file_path, str(e)))
try:
policy = encoding.PyValueToMessage(policy_message_type, policy_to_parse)
except (AttributeError) as e:
# Raised when the YAML file is not properly formatted YAML policy file.
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted YAML policy file. {1}'
.format(policy_file_path, str(e)))
except (apitools_messages.DecodeError) as e:
# DecodeError is raised when etag is badly formatted (not proper Base64)
raise IamEtagReadError(
'The etag of policy file {0} is not properly formatted. {1}'
.format(policy_file_path, str(e)))
return policy
def GetDetailedHelpForSetIamPolicy(collection, example_id, example_see_more=''):
"""Returns a detailed_help for a set-iam-policy command.
Args:
collection: Name of the command collection (ex: "project", "dataset")
example_id: Collection identifier to display in a sample command
(ex: "my-project", '1234')
example_see_more: Optional "See ... for details" message. If not specified,
includes a default reference to IAM managing-policies documentation
Returns:
a dict with boilerplate help text for the set-iam-policy command
"""
if not example_see_more:
example_see_more = """
See https://cloud.google.com/iam/docs/managing-policies for details
of the policy file format and contents."""
return {
'brief': 'Set IAM policy for a {0}.'.format(collection),
'DESCRIPTION': '{description}',
'EXAMPLES': """\
The following command will read an IAM policy defined in a JSON file
'policy.json' and set it for a {0} with identifier '{1}'
$ {{command}} {1} policy.json
{2}""".format(collection, example_id, example_see_more)
}
def GetDetailedHelpForAddIamPolicyBinding(collection, example_id,
role='roles/editor'):
"""Returns a detailed_help for an add-iam-policy-binding command.
Args:
collection: Name of the command collection (ex: "project", "dataset")
example_id: Collection identifier to display in a sample command
(ex: "my-project", '1234')
role: The sample role to use in the documentation. The default of
'roles/editor' is usually sufficient, but if your command group's
users would more likely use a different role, you can override it here.
Returns:
a dict with boilerplate help text for the add-iam-policy-binding command
"""
return {
'brief': 'Add IAM policy binding for a {0}.'.format(collection),
'DESCRIPTION': '{description}',
'EXAMPLES': """\
The following command will add an IAM policy binding for the role
of '{role}' for the user 'test-user@gmail.com' on a {collection} with
identifier '{example_id}'
$ {{command}} {example_id} --member='user:test-user@gmail.com' --role='{role}'
See https://cloud.google.com/iam/docs/managing-policies for details
of policy role and member types.
""".format(collection=collection, example_id=example_id, role=role)
}
def GetDetailedHelpForRemoveIamPolicyBinding(collection, example_id,
role='roles/editor'):
"""Returns a detailed_help for a remove-iam-policy-binding command.
Args:
collection: Name of the command collection (ex: "project", "dataset")
example_id: Collection identifier to display in a sample command
(ex: "my-project", '1234')
role: The sample role to use in the documentation. The default of
'roles/editor' is usually sufficient, but if your command group's
users would more likely use a different role, you can override it here.
Returns:
a dict with boilerplate help text for the remove-iam-policy-binding command
"""
return {
'brief': 'Remove IAM policy binding for a {0}.'.format(collection),
'DESCRIPTION': '{description}',
'EXAMPLES': """\
The following command will remove a IAM policy binding for the role
of '{role}' for the user 'test-user@gmail.com' on {collection} with
identifier '{example_id}'
$ {{command}} {example_id} --member='user:test-user@gmail.com' --role='{role}'
See https://cloud.google.com/iam/docs/managing-policies for details
of policy role and member types.
""".format(collection=collection, example_id=example_id, role=role)
}
def ManagedByFromString(managed_by):
"""Parses a string into a MANAGED_BY enum.
MANAGED_BY is an enum of who manages a service account key resource. IAM
will rotate any SYSTEM_MANAGED keys by default.
Args:
managed_by: A string representation of a MANAGED_BY. Can be one of *user*,
*system* or *any*.
Returns:
A KeyTypeValueValuesEnum (MANAGED_BY) value.
"""
if managed_by == 'user':
return [MANAGED_BY.USER_MANAGED]
elif managed_by == 'system':
return [MANAGED_BY.SYSTEM_MANAGED]
elif managed_by == 'any':
return []
else:
return [MANAGED_BY.KEY_TYPE_UNSPECIFIED]
def KeyTypeFromString(key_str):
"""Parses a string into a KeyType enum.
Args:
key_str: A string representation of a KeyType. Can be either *p12* or
*json*.
Returns:
A PrivateKeyTypeValueValuesEnum value.
"""
if key_str == 'p12':
return KEY_TYPES.TYPE_PKCS12_FILE
elif key_str == 'json':
return KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return KEY_TYPES.TYPE_UNSPECIFIED
def KeyTypeToString(key_type):
"""Get a string version of a KeyType enum.
Args:
key_type: An enum of either KEY_TYPES or CREATE_KEY_TYPES.
Returns:
The string representation of the key_type, such that
parseKeyType(keyTypeToString(x)) is a no-op.
"""
if (key_type == KEY_TYPES.TYPE_PKCS12_FILE or
key_type == CREATE_KEY_TYPES.TYPE_PKCS12_FILE):
return 'p12'
elif (key_type == KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE or
key_type == CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE):
return 'json'
else:
return 'unspecified'
def KeyTypeToCreateKeyType(key_type):
"""Transforms between instances of KeyType enums.
Transforms KeyTypes into CreateKeyTypes.
Args:
key_type: A ServiceAccountKey.PrivateKeyTypeValueValuesEnum value.
Returns:
A IamProjectsServiceAccountKeysCreateRequest.PrivateKeyTypeValueValuesEnum
value.
"""
# For some stupid reason, HTTP requests generates different enum types for
# each instance of an enum in the proto buffer. What's worse is that they're
# not equal to one another.
if key_type == KEY_TYPES.TYPE_PKCS12_FILE:
return CREATE_KEY_TYPES.TYPE_PKCS12_FILE
elif key_type == KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE:
return CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return CREATE_KEY_TYPES.TYPE_UNSPECIFIED
def KeyTypeFromCreateKeyType(key_type):
"""The inverse of *toCreateKeyType*."""
if key_type == CREATE_KEY_TYPES.TYPE_PKCS12_FILE:
return KEY_TYPES.TYPE_PKCS12_FILE
elif key_type == CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE:
return KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return KEY_TYPES.TYPE_UNSPECIFIED
def AccountNameValidator():
# https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create
return arg_parsers.RegexpValidator(
r'[a-z][a-z0-9\-]{4,28}[a-z0-9]',
'Service account name must be between 6 and 30 characters (inclusive), '
'must begin with a lowercase letter, and consist of alphanumeric '
'characters that can be separated by hyphens.')
def ProjectToProjectResourceName(project):
"""Turns a project id into a project resource name."""
return 'projects/{0}'.format(project)
def EmailToAccountResourceName(email):
"""Turns an email into a service account resource name."""
return 'projects/-/serviceAccounts/{0}'.format(email)
def EmailAndKeyToResourceName(email, key):
"""Turns an email and key id into a key resource name."""
return 'projects/-/serviceAccounts/{0}/keys/{1}'.format(email, key)
def GetKeyIdFromResourceName(name):
"""Gets the key id from a resource name. No validation is done."""
return name.split('/')[5]
def PublicKeyTypeFromString(key_str):
"""Parses a string into a PublicKeyType enum.
Args:
key_str: A string representation of a PublicKeyType. Can be either *pem* or
*raw*.
Returns:
A PublicKeyTypeValueValuesEnum value.
"""
if key_str == 'pem':
return PUBLIC_KEY_TYPES.TYPE_X509_PEM_FILE
return PUBLIC_KEY_TYPES.TYPE_RAW_PUBLIC_KEY
|
|
from reviewboard.diffviewer.chunk_generator import RawDiffChunkGenerator
from reviewboard.testing import TestCase
class RawDiffChunkGeneratorTests(TestCase):
"""Unit tests for RawDiffChunkGenerator."""
@property
def generator(self):
"""Create a dummy generator for tests that need it.
This generator will be void of any content. It's intended for
use in tests that need to operate on its utility functions.
"""
return RawDiffChunkGenerator(old=b'',
new=b'',
orig_filename='',
modified_filename='')
def test_get_chunks(self):
"""Testing RawDiffChunkGenerator.get_chunks"""
old = (
b'This is line 1\n'
b'Another line\n'
b'Line 3.\n'
b'la de da.\n'
)
new = (
b'This is line 1\n'
b'Line 3.\n'
b'la de doo.\n'
)
generator = RawDiffChunkGenerator(old, new, 'file1', 'file2')
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 4)
self.assertEqual(
chunks[0],
{
'change': 'equal',
'collapsable': False,
'index': 0,
'lines': [
[
1,
1,
'This is line 1',
[],
1,
'This is line 1',
[],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertEqual(
chunks[1],
{
'change': 'delete',
'collapsable': False,
'index': 1,
'lines': [
[
2,
2,
'Another line',
[],
'',
'',
[],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertEqual(
chunks[2],
{
'change': 'equal',
'collapsable': False,
'index': 2,
'lines': [
[
3,
3,
'Line 3.',
[],
2,
'Line 3.',
[],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertEqual(
chunks[3],
{
'change': 'replace',
'collapsable': False,
'index': 3,
'lines': [
[
4,
4,
'la de da.',
[(7, 8)],
3,
'la de doo.',
[(7, 9)],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
def test_get_chunks_with_enable_syntax_highlighting_true(self):
"""Testing RawDiffChunkGenerator.get_chunks with
enable_syntax_highlighting=True and syntax highlighting
available for file
"""
old = b'This is **bold**'
new = b'This is *italic*'
generator = RawDiffChunkGenerator(old=old,
new=new,
orig_filename='file1.md',
modified_filename='file2.md')
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
self.assertEqual(
chunks[0],
{
'change': 'replace',
'collapsable': False,
'index': 0,
'lines': [
[
1,
1,
'This is <span class="gs">**bold**</span>',
[(9, 16)],
1,
'This is <span class="ge">*italic*</span>',
[(9, 16)],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
}
)
def test_get_chunks_with_enable_syntax_highlighting_false(self):
"""Testing RawDiffChunkGenerator.get_chunks with
enable_syntax_highlighting=False
"""
old = b'This is **bold**'
new = b'This is *italic*'
generator = RawDiffChunkGenerator(old=old,
new=new,
orig_filename='file1.md',
modified_filename='file2.md',
enable_syntax_highlighting=False)
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
self.assertEqual(
chunks[0],
{
'change': 'replace',
'collapsable': False,
'index': 0,
'lines': [
[
1,
1,
'This is **bold**',
[(9, 16)],
1,
'This is *italic*',
[(9, 16)],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
}
)
def test_get_chunks_with_syntax_highlighting_blacklisted(self):
"""Testing RawDiffChunkGenerator.get_chunks with syntax highlighting
blacklisted for file
"""
class MyRawDiffChunkGenerator(RawDiffChunkGenerator):
STYLED_EXT_BLACKLIST = (
'.md',
)
old = b'This is **bold**'
new = b'This is *italic*'
generator = MyRawDiffChunkGenerator(old=old,
new=new,
orig_filename='file1.md',
modified_filename='file2.md')
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 1)
self.assertEqual(
chunks[0],
{
'change': 'replace',
'collapsable': False,
'index': 0,
'lines': [
[
1,
1,
'This is **bold**',
[(9, 16)],
1,
'This is *italic*',
[(9, 16)],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
}
)
def test_generate_chunks_with_encodings(self):
"""Testing RawDiffChunkGenerator.generate_chunks with explicit
encodings for old and new
"""
old = (
'This is line 1\n'
'Another line\n'
'Line 3.\n'
'la de da.\n'
).encode('utf-8')
new = (
'This is line 1\n'
'Line 3.\n'
'la de doo.\n'
).encode('utf-16')
generator = RawDiffChunkGenerator(old=old,
new=new,
orig_filename='file1',
modified_filename='file2')
chunks = list(generator.generate_chunks(
old=old,
new=new,
old_encoding_list=['utf-8'],
new_encoding_list=['utf-16']
))
self.assertEqual(len(chunks), 4)
self.assertEqual(
chunks[0],
{
'change': 'equal',
'collapsable': False,
'index': 0,
'lines': [
[
1,
1,
'This is line 1',
[],
1,
'This is line 1',
[],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertEqual(
chunks[1],
{
'change': 'delete',
'collapsable': False,
'index': 1,
'lines': [
[
2,
2,
'Another line',
[],
'',
'',
[],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertEqual(
chunks[2],
{
'change': 'equal',
'collapsable': False,
'index': 2,
'lines': [
[
3,
3,
'Line 3.',
[],
2,
'Line 3.',
[],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
self.assertEqual(
chunks[3],
{
'change': 'replace',
'collapsable': False,
'index': 3,
'lines': [
[
4,
4,
'la de da.',
[(7, 8)],
3,
'la de doo.',
[(7, 9)],
False,
],
],
'meta': {
'left_headers': [],
'right_headers': [],
'whitespace_chunk': False,
'whitespace_lines': [],
},
'numlines': 1,
})
def test_apply_pygments_with_lexer(self):
"""Testing RawDiffChunkGenerator._apply_pygments with valid lexer"""
chunk_generator = RawDiffChunkGenerator(old=[],
new=[],
orig_filename='file1',
modified_filename='file2')
self.assertEqual(
chunk_generator._apply_pygments(data='This is **bold**\n',
filename='test.md'),
['This is <span class="gs">**bold**</span>'])
def test_apply_pygments_without_lexer(self):
"""Testing RawDiffChunkGenerator._apply_pygments without valid lexer"""
chunk_generator = RawDiffChunkGenerator(old=[],
new=[],
orig_filename='file1',
modified_filename='file2')
self.assertIsNone(
chunk_generator._apply_pygments(data='This is **bold**',
filename='test'))
def test_apply_pygments_with_blacklisted_file(self):
"""Testing RawDiffChunkGenerator._apply_pygments with blacklisted
file extension
"""
class MyRawDiffChunkGenerator(RawDiffChunkGenerator):
STYLED_EXT_BLACKLIST = (
'.md',
)
chunk_generator = MyRawDiffChunkGenerator(old=[],
new=[],
orig_filename='file1',
modified_filename='file2')
self.assertIsNone(
chunk_generator._apply_pygments(data='This is **bold**',
filename='test.md'))
def test_get_move_info_with_new_range_no_preceding(self):
"""Testing RawDiffChunkGenerator._get_move_info with new move range and
no adjacent preceding move range
"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertEqual(
generator._get_move_info(10, {
8: 100,
10: 200,
11: 201,
}),
(200, True))
def test_get_move_info_with_new_range_preceding(self):
"""Testing RawDiffChunkGenerator._get_move_info with new move range and
adjacent preceding move range
"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertEqual(
generator._get_move_info(10, {
8: 100,
9: 101,
10: 200,
11: 201,
}),
(200, True))
def test_get_move_info_with_existing_range(self):
"""Testing RawDiffChunkGenerator._get_move_info with existing move
range
"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertEqual(
generator._get_move_info(11, {
8: 100,
9: 101,
10: 200,
11: 201,
}),
(201, False))
def test_get_move_info_with_no_move(self):
"""Testing RawDiffChunkGenerator._get_move_info with no move range"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertIsNone(generator._get_move_info(500, {
8: 100,
9: 101,
10: 200,
11: 201,
}))
def test_indent_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_indentation with spaces"""
self.assertEqual(
self.generator._serialize_indentation(' ', 4),
('>>>>', ''))
def test_indent_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_indentation with tabs"""
self.assertEqual(
self.generator._serialize_indentation('\t', 8),
('——————>|', ''))
def test_indent_spaces_and_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with spaces and tabs
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>———>|', ''))
def test_indent_tabs_and_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with tabs and spaces
"""
self.assertEqual(
self.generator._serialize_indentation('\t ', 11),
('——————>|>>>',
''))
def test_indent_9_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with 9 spaces and tab
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>>>>>|', ''))
def test_indent_8_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with 8 spaces and tab
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>>>>>|', ''))
def test_indent_7_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with 7 spaces and tab
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>>>—>|', ''))
def test_unindent_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation with spaces
"""
self.assertEqual(
self.generator._serialize_unindentation(' ', 4),
('<<<<', ''))
def test_unindent_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation with tabs"""
self.assertEqual(
self.generator._serialize_unindentation('\t', 8),
('|<——————', ''))
def test_unindent_spaces_and_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with spaces and tabs
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<|<———', ''))
def test_unindent_tabs_and_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with tabs and spaces
"""
self.assertEqual(
self.generator._serialize_unindentation('\t ', 11),
('|<——————<<<',
''))
def test_unindent_9_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with 9 spaces and tab
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<<<<<|', ''))
def test_unindent_8_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with 8 spaces and tab
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<<<<|<', ''))
def test_unindent_7_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with 7 spaces and tab
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<<<|<—', ''))
def test_highlight_indent(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with indentation
"""
self.assertEqual(
self.generator._highlight_indentation(
'',
' foo',
True, 4, 4),
('', '<span class="indent">>>>></span> foo'))
def test_highlight_indent_with_adjacent_tag(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with indentation and adjacent tag wrapping whitespace
"""
self.assertEqual(
self.generator._highlight_indentation(
'',
'<span class="s"> </span>foo',
True, 1, 1),
('',
'<span class="s"><span class="indent">></span></span>foo'))
def test_highlight_indent_with_unexpected_chars(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with indentation and unexpected markup chars
"""
self.assertEqual(
self.generator._highlight_indentation(
'',
' <span> </span> foo',
True, 4, 2),
('', ' <span> </span> foo'))
def test_highlight_unindent(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation
"""
self.assertEqual(
self.generator._highlight_indentation(
' foo',
'',
False, 4, 4),
('<span class="unindent"><<<<</span> foo', ''))
def test_highlight_unindent_with_adjacent_tag(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and adjacent tag wrapping whitespace
"""
self.assertEqual(
self.generator._highlight_indentation(
'<span class="s"> </span>foo',
'',
False, 1, 1),
('<span class="s"><span class="unindent"><</span></span>foo',
''))
def test_highlight_unindent_with_unexpected_chars(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and unexpected markup chars
"""
self.assertEqual(
self.generator._highlight_indentation(
' <span> </span> foo',
'',
False, 4, 2),
(' <span> </span> foo', ''))
def test_highlight_unindent_with_replacing_last_tab_with_spaces(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and replacing last tab with spaces
"""
self.assertEqual(
self.generator._highlight_indentation(
'<span>\t\t </span> foo',
'',
False, 2, 16),
('<span><span class="unindent">'
'|<——————'
'|<——————'
'</span> </span> foo', ''))
def test_highlight_unindent_with_replacing_3_tabs_with_tab_spaces(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and replacing 3 tabs with 1 tab and 8 spaces
"""
self.assertEqual(
self.generator._highlight_indentation(
'<span>\t </span> foo',
'',
False, 1, 24),
('<span><span class="unindent">'
'|<——————'
'</span> </span> foo', ''))
|
|
import pygame
import sys, os, random
import numpy as np
import cv2
##
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
import freenect
import imutils
from collections import deque
# Global constants
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
# Screen dimensions
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
####################################################################33
class kinect_cam():
def __init__(self):
self.isopen = True
self.rgb = np.array([])
self.depth = np.array([])
self.convert_rgb = True
self.convert_depth = True
self.window = None
self.trh = 100
self.filterSize = 10
self.ap_mask = True
self.filter = True
self.m = 1.0
self.b = 0.0
self.mask = 255
self.kernel = np.ones((5, 5), np.uint8)
self.x = 0
self.y = 0
self.up = False
self.left = False
self.right = False
self.down = False
self.on_move = False
self.buffer = 15
self.pts = deque(maxlen=self.buffer)
self.counter = 0
self.direction = ""
self.epsilon = 40
#function to get RGB image from kinect
def get_video(self):
self.rgb,_ = freenect.sync_get_video()
if self.rgb is None:
self.rgb = np.array([])
if self.convert_rgb:
self.rgb = cv2.cvtColor(self.rgb,cv2.COLOR_RGB2BGR)
return
#function to get depth image from kinect
def get_depth(self):
self.depth,_ = freenect.sync_get_depth()
if self.depth is None:
self.depth = np.array([])
if self.convert_depth:
#clip max depth to 1023, convert to 8-bit grayscale
np.clip(self.depth,0,2**10-1, self.depth)
self.depth >>= 2
self.depth = self.depth.astype(np.uint8)
self.process()
return
#pre-process image
def movement(self):
if (len(self.pts) == self.buffer):
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = self.pts[-self.buffer][0] - self.pts[-1][0]
dY = self.pts[-self.buffer][1] - self.pts[-1][1]
(dirX, dirY) = ("", "")
# ensure there is significant movement in the
# x-direction
if np.abs(dX) > self.epsilon:
self.left = np.sign(dX)==-1
self.right = np.sign(dX)==1
dirX = "Izquierda" if self.left else "Derecha"
else:
self.left = False
self.right = False
# ensure there is significant movement in the
# y-direction
if np.abs(dY) > self.epsilon:
self.up = np.sign(dY)==-1
self.down = np.sign(dY)==1
dirY = "Arriba" if np.sign(dY) == -1 else "Abajo"
else:
self.up = False
self.down = False
# handle when both directions are non-empty
if dirX != "" and dirY != "":
self.direction = "{}-{}".format(dirY, dirX)
# otherwise, only one direction is non-empty
else:
self.direction = dirX if dirX != "" else dirY
self.counter+=1
self.on_move = self.up or self.down or self.left or self.right
return
def process(self):
if self.ap_mask:
self.depth = self.mask - self.depth
#self.depth_p = self.depth*self.m+self.b
self.img_g = cv2.flip( self.depth, 1 ) #cv2.cvtColor(self.depth, cv2.COLOR_BGR2GRAY);
if self.filter:
self.img_g = cv2.blur(self.img_g,(self.filterSize,self.filterSize))
self.img_wb = cv2.threshold(self.img_g, self.trh, 255, cv2.THRESH_BINARY)[1]
#self.img_wb = cv2.cvtColor(self.img_wb, cv2.COLOR_BGR2GRAY);
self.img_wb = cv2.morphologyEx(self.img_wb, cv2.MORPH_CLOSE, self.kernel)
cnts = cv2.findContours(self.img_wb.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# ensure that at least one contour was found
self.x0=self.x
self.y0=self.y
if len(cnts) > 0:
# sort the contours according to their size in
# descending order
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
# for c in cnts:
c = cnts[0]
# compute the center of the contour
M = cv2.moments(c)
try:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
self.x=cX
self.y=cY
self.pts.appendleft((cX,cY))
if not(self.window == None):
# draw the contour and center of the shape on the image
cv2.drawContours(self.img_g, [c], -1, (0, 255, 0), 2)
cv2.circle(self.img_g, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(self.img_g, "center", (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 100, 100), 2)
self.window.on_draw(self.img_g)
except:
pass
self.movement()
return
####################################################################33
class Player(pygame.sprite.Sprite):
""" This class represents the bar at the bottom that the player
controls. """
# -- Methods
def __init__(self):
""" Constructor function """
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
width = 40
height = 60
self.image = pygame.Surface([width, height])
self.image.fill(RED)
# Set a referance to the image rect.
self.rect = self.image.get_rect()
# Set speed vector of player
self.change_x = 0
self.change_y = 0
# List of sprites we can bump against
self.level = None
def update(self):
""" Move the player. """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.change_x
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = SCREEN_HEIGHT - self.rect.height
def jump(self):
""" Called when user hits 'jump' button. """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down
# 1 when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set our speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:
self.change_y = -10
# Player-controlled movement:
def go_left(self):
""" Called when the user hits the left arrow. """
self.change_x = -6
def go_right(self):
""" Called when the user hits the right arrow. """
self.change_x = 6
def stop(self):
""" Called when the user lets off the keyboard. """
self.change_x = 0
class Platform(pygame.sprite.Sprite):
""" Platform the user can jump on """
def __init__(self, width, height):
""" Platform constructor. Assumes constructed with user passing in
an array of 5 numbers like what's defined at the top of this
code. """
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.image.fill(GREEN)
self.rect = self.image.get_rect()
class Level(object):
""" This is a generic super-class used to define a level.
Create a child class for each level with level-specific
info. """
def __init__(self, player):
""" Constructor. Pass in a handle to player. Needed for when moving platforms
collide with the player. """
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
self.player = player
# Background image
self.background = None
# Update everythign on this level
def update(self):
""" Update everything in this level."""
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
""" Draw everything on this level. """
# Draw the background
screen.fill(BLUE)
# Draw all the sprite lists that we have
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
# Create platforms for the level
class Level_01(Level):
""" Definition for level 1. """
def __init__(self, player):
""" Create level 1. """
# Call the parent constructor
Level.__init__(self, player)
# Array with width, height, x, and y of platform
level = [[210, 70, 500, 500],
[210, 70, 200, 400],
[210, 70, 600, 300],
]
# Go through the array above and add platforms
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
def main():
""" Main Program """
pygame.init()
# Set the height and width of the screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Saltar entre plataformas")
# Create the player
player = Player()
# Create all the levels
level_list = []
level_list.append( Level_01(player) )
# Set the current level
current_level_no = 0
current_level = level_list[current_level_no]
active_sprite_list = pygame.sprite.Group()
player.level = current_level
player.rect.x = 340
player.rect.y = SCREEN_HEIGHT - player.rect.height
active_sprite_list.add(player)
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
cam=kinect_cam()
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
cam.get_depth()
if cam.on_move and not(cam.down):
if cam.left:
player.go_left()
if cam.right:
player.go_right()
if cam.up:
player.jump()
if not(cam.on_move) or cam.down:
player.stop()
# Update the player.
active_sprite_list.update()
# Update items in the level
current_level.update()
# If the player gets near the right side, shift the world left (-x)
if player.rect.right > SCREEN_WIDTH:
player.rect.right = SCREEN_WIDTH
# If the player gets near the left side, shift the world right (+x)
if player.rect.left < 0:
player.rect.left = 0
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
current_level.draw(screen)
active_sprite_list.draw(screen)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in tgt[
platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' %
compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell', 'builder', 'builder_prefix_arguments', 'venv_relative_python',
'toolchain', 'runner'
])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, [
'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (
_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True) else ['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary, '--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return [
'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
if self.args.compiler in ['python3.5', 'python3.6']:
return 'pyenv'
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(shell, builder,
builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (python27_config, python34_config, python35_config,
python36_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (
assembly, assembly_subdir, assembly, assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg))
for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epollsig,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--disable_auto_set_flakes',
default=False,
const=True,
action='store_const',
help='Disable rerunning historically flaky tests')
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if not args.disable_auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print("Unexpected error getting flaky tests: %s" %
traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option
for lang in languages for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown, 'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(
makefile, set()).union(set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen('http://localhost:%d/quitquitquit' %
legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError, e:
return False
except OSError, e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and (
args.regex_exclude == '' or not re.search(
args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' %
(k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_results_to_bq(resultset, args.bq_result_table, args,
platform_string())
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
|
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import sys
# Definitions for python 2/3 compatability
if sys.version_info[0] > 2:
basestring = str
long = int
MAX_REPR = 50
class JsonObject(dict):
def __init__(self, *pairs, **kwarg):
if isinstance(pairs, dict):
self._pairs = tuple(pairs.items())
else:
self._pairs = pairs
if len(kwarg) > 0:
self._pairs = self._pairs + tuple(kwarg.items())
if any(not isinstance(kv, tuple) or len(kv) != 2 for kv in self._pairs):
raise TypeError("JsonObject pairs must all be two-element tuples")
if any(not isinstance(k, basestring) or not (v is None or isinstance(
v, (basestring, bool, int, long, float, JsonObject, JsonArray))) for k, v in self._pairs):
raise TypeError(
"JsonObject keys must be strings and values must be (string, bool, int, float, JsonObject, JsonArray)")
def toJsonString(self, prefix="", indent=2):
out = ["{\n", prefix, " "]
first = True
for k, v in self._pairs:
if first:
first = False
else:
out.append(",\n")
out.append(prefix)
out.append(" ")
out.append(json.dumps(k))
out.append(": ")
if isinstance(v, float) and (math.isnan(v) or math.isinf(v)):
raise ValueError("cannot JSON-serialize NaN or Infinity")
elif isinstance(v, (basestring, bool, int, long, float)):
v = json.dumps(v)
else:
v = v.toJsonString(prefix + (" " * indent), indent)
out.append(v)
out.append("\n")
out.append(prefix)
out.append("}")
return "".join(out)
def _index(self, key):
for i, (k, v) in enumerate(self._pairs):
if k == key:
return i
return -1
def set(self, *path, **kwds):
if "to" not in kwds:
raise TypeError("missing keyword argument 'to' in set(path, to=value)")
elif len(kwds) != 1:
raise TypeError("unrecognized keyword arguments in set(path, to=value)")
value = kwds["to"]
if len(path) < 1:
raise TypeError("missing path in set(path, to=value)")
key = path[0]
index = self._index(key)
if len(path) == 1:
if index == -1:
return JsonObject(*(self._pairs + ((key, value),)))
else:
return JsonObject(*[(key, value) if k == key else (k, v) for k, v in self._pairs])
else:
if index == -1:
raise ValueError("JsonObject field {0} does not contain path ({1})".format(
repr(key), ", ".join(map(repr, path[1:]))))
elif not isinstance(self._pairs[index][1], (JsonObject, JsonArray)):
raise ValueError("JsonObject field {0} does not contain path ({1})".format(
repr(key), ", ".join(map(repr, path[1:]))))
else:
return JsonObject(*[(k, v.set(*path[1:], **kwds)) if k == key else (k, v) for k, v in self._pairs])
def without(self, *path):
if len(path) < 1:
raise TypeError("missing path in without(path)")
key = path[0]
index = self._index(key)
if len(path) == 1:
if index == -1:
return self
else:
return JsonObject(*[(k, v) for k, v in self._pairs if k != key])
else:
if index == -1:
raise ValueError("JsonObject field {0} does not contain path ({1})".format(
repr(key), ", ".join(map(repr, path[1:]))))
elif not isinstance(self._pairs[index][1], (JsonObject, JsonArray)):
raise ValueError("JsonObject field {0} does not contain path ({1})".format(
repr(key), ", ".join(map(repr, path[1:]))))
else:
return JsonObject(*[(k, v.without(*path[1:])) if k == key else (k, v) for k, v in self._pairs])
def overlay(self, other):
out = self
for k, v in other.items():
out = out.set(k, to=v)
return out
# override built-in dict methods
def __cmp__(self, other):
def cmp(a, b):
return (a > b) - (a < b)
return cmp(dict(self._pairs), dict(other._pairs))
def __contains__(self, key):
return any(k == key for k, v in self._pairs)
def __delattr_(self, key):
raise TypeError("JsonObject cannot be changed in-place; no immutable equivalent")
def __delitem__(self, key):
raise TypeError("JsonObject cannot be changed in-place; use .without(key)")
def __eq__(self, other):
return isinstance(other, JsonObject) and self._pairs == other._pairs
def __format__(self, format_spec):
return str(self)
def __getitem__(self, key):
index = self._index(key)
if index == -1:
raise KeyError(key)
else:
return self._pairs[index][1]
def __hash__(self):
return hash(("JsonObject", self._pairs))
def __iter__(self):
return self.keys()
def __len__(self):
return len(self._pairs)
def __reduce__(self):
return self.__reduce_ex__(0)
def __reduce_ex__(self, protocol):
return (self.__class__, self._pairs)
def __repr__(self):
out = "{"
first = True
for k, v in self._pairs:
if first:
first = False
else:
out += ", "
if len(out) > MAX_REPR - 1:
break
if isinstance(v, (basestring, bool, int, long, float)):
v = json.dumps(v)
else:
v = repr(v)
out += json.dumps(k) + ": " + v
if len(out) > MAX_REPR - 1:
out = out[:(MAX_REPR - 4)] + "..."
return out + "}"
def __setitem__(self, key, value):
raise TypeError("JsonObject cannot be changed in-place; use .set(path, to=value)")
def __sizeof__(self):
return super(dict, self).__sizeof__()
def __str__(self):
out = ["{"]
first = True
for k, v in self._pairs:
if first:
first = False
else:
out.append(",")
out.append(json.dumps(k))
out.append(":")
if isinstance(v, (basestring, bool, int, long, float)):
v = json.dumps(v)
else:
v = str(v)
out.append(v)
out.append("}")
return "".join(out)
def clear(self):
raise TypeError("JsonObject cannot be changed in-place; use JsonObject() constructor to make a new one")
def copy(self):
return self # because we're immutable
def __copy__(self):
return self # because we're immutable
def __deepcopy__(self, memo):
return self # because we're immutable
def get(self, key, default=None):
index = self._index(key)
if index == -1:
return default
else:
return self._pairs[index][1]
def has_key(self, key):
return key in self
def items(self):
for k, v in self._pairs:
yield k, v
def iteritems(self):
return self.items()
def iterkeys(self):
return self.keys()
def itervalues(self):
return self.values()
def keys(self):
for k, v in self._pairs:
yield k
def pop(self, key, default=None):
raise TypeError("JsonObject cannot be changed in-place; no immutable equivalent")
def popitem(self, key, default=None):
raise TypeError("JsonObject cannot be changed in-place; no immutable equivalent")
def setdefault(self, key, default=None):
raise TypeError("JsonObject cannot be changed in-place; no immutable equivalent")
def update(self, other):
raise TypeError("JsonObject cannot be changed in-place; use .overlay(other)")
def values(self):
for k, v in self._pairs:
yield v
def viewitems(self):
return self.items()
def viewkeys(self):
return self.keys()
def viewvalues(self):
return self.values()
class JsonArray(tuple):
def __init__(self, *values):
self._values = values
if any(not (v is None or isinstance(v, (basestring, bool, int, long, float, JsonObject, JsonArray)))
for v in self._values):
raise TypeError("JsonArray values must be (string, bool, int, float, JsonObject, JsonArray)")
def toJsonString(self, prefix="", indent=2):
out = [prefix, "[\n", prefix, " "]
first = True
for v in self._values:
if first:
first = False
else:
out.append(",\n")
out.append(prefix)
out.append(" ")
if isinstance(v, float) and (math.isnan(v) or math.isinf(v)):
raise ValueError("cannot JSON-serialize NaN or Infinity")
elif isinstance(v, (basestring, bool, int, long, float)):
v = json.dumps(v)
else:
v = v.toJsonString(prefix + (" " * indent), indent)
out.append(v)
out.append("\n")
out.append(prefix)
out.append("]")
return "".join(out)
# override built-in tuple methods
# __add__
# __cmp__
# __contains__
# __delattr__
# __delitem__
# __eq__
# __format__
# __getitem__
# __getnewargs__
# __getslice__
# __hash__
# __iter__
# __len__
# __mul__
# __reduce__
# __reduce_ex__
def __repr__(self):
out = "["
first = True
for v in self._values:
if first:
first = False
else:
out += ", "
if len(out) > MAX_REPR - 1:
break
out += repr(v)
if len(out) > MAX_REPR - 1:
out = out[:(MAX_REPR - 4)] + "..."
return out + "]"
# __rmul__
# __sizeof__
def __str__(self):
out = ["["]
first = False
for v in self._values:
if first:
first = False
else:
out.append(",")
if isinstance(v, (basestring, bool, int, long, float)):
v = json.dumps(v)
else:
v = str(v)
out.append(v)
out.append("]")
return "".join(out)
# count
# index
|
|
import logging
import os
import textwrap
from optparse import Values
from typing import Any, List
import pip._internal.utils.filesystem as filesystem
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import CommandError, PipError
logger = logging.getLogger(__name__)
class CacheCommand(Command):
"""
Inspect and manage pip's wheel cache.
Subcommands:
- dir: Show the cache directory.
- info: Show information about the cache.
- list: List filenames of packages stored in the cache.
- remove: Remove one or more package from the cache.
- purge: Remove all items from the cache.
``<pattern>`` can be a glob expression or a package name.
"""
ignore_require_venv = True
usage = """
%prog dir
%prog info
%prog list [<pattern>] [--format=[human, abspath]]
%prog remove <pattern>
%prog purge
"""
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="human",
choices=('human', 'abspath'),
help="Select the output format among: human (default) or abspath"
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
# type: (Values, List[Any]) -> int
handlers = {
"dir": self.get_cache_dir,
"info": self.get_cache_info,
"list": self.list_cache_items,
"remove": self.remove_cache_items,
"purge": self.purge_cache,
}
if not options.cache_dir:
logger.error("pip cache commands can not "
"function since cache is disabled.")
return ERROR
# Determine action
if not args or args[0] not in handlers:
logger.error(
"Need an action (%s) to perform.",
", ".join(sorted(handlers)),
)
return ERROR
action = args[0]
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def get_cache_dir(self, options, args):
# type: (Values, List[Any]) -> None
if args:
raise CommandError('Too many arguments')
logger.info(options.cache_dir)
def get_cache_info(self, options, args):
# type: (Values, List[Any]) -> None
if args:
raise CommandError('Too many arguments')
num_http_files = len(self._find_http_files(options))
num_packages = len(self._find_wheels(options, '*'))
http_cache_location = self._cache_dir(options, 'http')
wheels_cache_location = self._cache_dir(options, 'wheels')
http_cache_size = filesystem.format_directory_size(http_cache_location)
wheels_cache_size = filesystem.format_directory_size(
wheels_cache_location
)
message = textwrap.dedent("""
Package index page cache location: {http_cache_location}
Package index page cache size: {http_cache_size}
Number of HTTP files: {num_http_files}
Wheels location: {wheels_cache_location}
Wheels size: {wheels_cache_size}
Number of wheels: {package_count}
""").format(
http_cache_location=http_cache_location,
http_cache_size=http_cache_size,
num_http_files=num_http_files,
wheels_cache_location=wheels_cache_location,
package_count=num_packages,
wheels_cache_size=wheels_cache_size,
).strip()
logger.info(message)
def list_cache_items(self, options, args):
# type: (Values, List[Any]) -> None
if len(args) > 1:
raise CommandError('Too many arguments')
if args:
pattern = args[0]
else:
pattern = '*'
files = self._find_wheels(options, pattern)
if options.list_format == 'human':
self.format_for_human(files)
else:
self.format_for_abspath(files)
def format_for_human(self, files):
# type: (List[str]) -> None
if not files:
logger.info('Nothing cached.')
return
results = []
for filename in files:
wheel = os.path.basename(filename)
size = filesystem.format_file_size(filename)
results.append(f' - {wheel} ({size})')
logger.info('Cache contents:\n')
logger.info('\n'.join(sorted(results)))
def format_for_abspath(self, files):
# type: (List[str]) -> None
if not files:
return
results = []
for filename in files:
results.append(filename)
logger.info('\n'.join(sorted(results)))
def remove_cache_items(self, options, args):
# type: (Values, List[Any]) -> None
if len(args) > 1:
raise CommandError('Too many arguments')
if not args:
raise CommandError('Please provide a pattern')
files = self._find_wheels(options, args[0])
# Only fetch http files if no specific pattern given
if args[0] == '*':
files += self._find_http_files(options)
if not files:
raise CommandError('No matching packages')
for filename in files:
os.unlink(filename)
logger.debug('Removed %s', filename)
logger.info('Files removed: %s', len(files))
def purge_cache(self, options, args):
# type: (Values, List[Any]) -> None
if args:
raise CommandError('Too many arguments')
return self.remove_cache_items(options, ['*'])
def _cache_dir(self, options, subdir):
# type: (Values, str) -> str
return os.path.join(options.cache_dir, subdir)
def _find_http_files(self, options):
# type: (Values) -> List[str]
http_dir = self._cache_dir(options, 'http')
return filesystem.find_files(http_dir, '*')
def _find_wheels(self, options, pattern):
# type: (Values, str) -> List[str]
wheel_dir = self._cache_dir(options, 'wheels')
# The wheel filename format, as specified in PEP 427, is:
# {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
#
# Additionally, non-alphanumeric values in the distribution are
# normalized to underscores (_), meaning hyphens can never occur
# before `-{version}`.
#
# Given that information:
# - If the pattern we're given contains a hyphen (-), the user is
# providing at least the version. Thus, we can just append `*.whl`
# to match the rest of it.
# - If the pattern we're given doesn't contain a hyphen (-), the
# user is only providing the name. Thus, we append `-*.whl` to
# match the hyphen before the version, followed by anything else.
#
# PEP 427: https://www.python.org/dev/peps/pep-0427/
pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
return filesystem.find_files(wheel_dir, pattern)
|
|
import os
import json
import unittest
from decimal import Decimal
from urllib import urlencode
from urlparse import urlparse
from datetime import date, datetime, timedelta
from mock import Mock
from django.apps import apps
from django.db import models, connection, IntegrityError
from django.db.models import F
from django.db.models.base import ModelBase
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.core.management.color import no_style
from django.contrib.auth.models import User, Group, Permission
from django.contrib.auth.decorators import login_required
from django.contrib.auth.management import create_permissions
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.management import update_contenttypes
from django.test import TestCase, TransactionTestCase
from django.test.client import MULTIPART_CONTENT, Client
from django.http.request import HttpRequest
from trusts.models import Trust, TrustManager, Content, Junction, \
Role, RolePermission, TrustUserPermission
from trusts.backends import TrustModelBackend
from trusts.decorators import permission_required
def create_test_users(test):
# Create a user.
test.username = 'daniel'
test.password = 'pass'
test.user = User.objects.create_user(test.username, 'daniel@example.com', test.password)
test.user.is_active = True
test.user.save()
# Create another
test.name1 = 'anotheruser'
test.pass1 = 'pass'
test.user1 = User.objects.create_user(test.name1, 'another@example.com', test.pass1)
test.user1.is_active = True
test.user1.save()
def get_or_create_root_user(test):
# Create a user.
pk = getattr(settings, 'TRUSTS_ROOT_SETTLOR', 1)
test.user_root, created = User.objects.get_or_create(pk=pk)
def reload_test_users(self):
# reloading user to purge the _trust_perm_cache
self.user_root = User._default_manager.get(pk=self.user_root.pk)
self.user = User._default_manager.get(pk=self.user.pk)
self.user1 = User._default_manager.get(pk=self.user1.pk)
class TrustTest(TestCase):
ROOT_PK = getattr(settings, 'TRUSTS_ROOT_PK', 1)
SETTLOR_PK = getattr(settings, 'TRUSTS_ROOT_SETTLOR', None)
def setUp(self):
super(TrustTest, self).setUp()
call_command('create_trust_root')
get_or_create_root_user(self)
create_test_users(self)
def get_perm_code(self, perm):
return '%s.%s' % (
perm.content_type.app_label, perm.codename
)
def test_root(self):
root = Trust.objects.get_root()
self.assertEqual(root.pk, self.ROOT_PK)
self.assertEqual(root.pk, root.trust.pk)
self.assertEqual(Trust.objects.filter(trust=F('id')).count(), 1)
def test_trust_unique_together_title_settlor(self):
# Create `Title A` for user
self.trust = Trust(settlor=self.user, title='Title A', trust=Trust.objects.get_root())
self.trust.save()
# Create `Title A` for user1
self.trust1 = Trust(settlor=self.user1, title='Title A', trust=Trust.objects.get_root())
self.trust1.save()
# Empty string title should be allowed (reserved for settlor_default)
self.trust = Trust(settlor=self.user, title='', trust=Trust.objects.get_root())
self.trust.save()
# Create `Title A` for user, again (should fail)
try:
self.trust2 = Trust(settlor=self.user, title='Title A', trust=Trust.objects.get_root())
self.trust2.save()
self.fail('Expected IntegrityError not raised.')
except IntegrityError as ie:
pass
def test_read_permissions_added(self):
ct = ContentType.objects.get_for_model(Trust)
self.assertIsNotNone(Permission.objects.get(
content_type=ct,
codename='%s_%s' % ('read', ct.model)
))
def test_filter_by_user_perm(self):
self.trust1, created = Trust.objects.get_or_create_settlor_default(self.user)
self.trust2 = Trust(settlor=self.user, title='Title 0A', trust=Trust.objects.get_root())
self.trust2.save()
tup = TrustUserPermission(trust=self.trust2, entity=self.user, permission=Permission.objects.first())
tup.save()
self.trust3 = Trust(settlor=self.user, title='Title 0B', trust=Trust.objects.get_root())
self.trust3.save()
self.trust4 = Trust(settlor=self.user1, title='Title 1A', trust=Trust.objects.get_root())
self.trust4.save()
tup = TrustUserPermission(trust=self.trust4, entity=self.user, permission=Permission.objects.first())
tup.save()
self.trust5 = Trust(settlor=self.user1, title='Title 1B', trust=Trust.objects.get_root())
self.trust5.save()
self.group = Group(name='Group A')
self.group.save()
self.user.groups.add(self.group)
self.trust5.groups.add(self.group)
self.trust6 = Trust(settlor=self.user1, title='Title 1C', trust=Trust.objects.get_root())
self.trust6.save()
trusts = Trust.objects.filter_by_user_perm(self.user)
trust_pks = [t.pk for t in trusts]
self.assertEqual(trusts.count(), 3)
self.assertTrue(self.trust2.id in trust_pks)
self.assertTrue(self.trust4.id in trust_pks)
self.assertTrue(self.trust5.id in trust_pks)
def test_change_trust(self):
self.trust1 = Trust(settlor=self.user, title='Title 0A', trust=Trust.objects.get_root())
self.trust1.save()
self.trust2 = Trust(settlor=self.user1, title='Title 1A', trust=Trust.objects.get_root())
self.trust2.save()
try:
self.trust2.trust = self.trust1
self.trust2.full_clean()
self.fail('Expected ValidationError not raised.')
except ValidationError as ve:
pass
class DecoratorsTest(TestCase):
def setUp(self):
super(DecoratorsTest, self).setUp()
call_command('create_trust_root')
get_or_create_root_user(self)
create_test_users(self)
def test_permission_required(self):
self.group = Group(name='Group A')
self.group.save()
request = HttpRequest()
setattr(request, 'user', self.user)
request.META['SERVER_NAME'] = 'beedesk.com'
request.META['SERVER_PORT'] = 80
# test a) has_perms() == False
mock = Mock(return_value='Response')
has_perms = Mock(return_value=False)
self.user.has_perms = has_perms
decorated_func = permission_required(
'auth.read_group',
fieldlookups_kwargs={'pk': 'pk'},
raise_exception=False
)(mock)
response = decorated_func(request, pk=self.group.pk)
self.assertFalse(mock.called)
self.assertTrue(response.status_code, 403)
self.assertTrue(has_perms.called)
self.assertEqual(has_perms.call_args[0][0], ('auth.read_group',))
filter = has_perms.call_args[0][1]
self.assertIsNotNone(filter)
self.assertEqual(filter.count(), 1)
self.assertEqual(filter.first().pk, self.group.pk)
# test b) has_perms() == False
mock = Mock(return_value='Response')
has_perms = Mock(return_value=True)
self.user.has_perms = has_perms
decorated_func = permission_required(
'auth.read_group',
fieldlookups_kwargs={'pk': 'pk'}
)(mock)
response = decorated_func(request, pk=self.group.pk)
self.assertTrue(mock.called)
mock.assert_called_with(request, pk=self.group.pk)
self.assertEqual(response, 'Response')
self.assertEqual(has_perms.call_args[0][0], ('auth.read_group',))
filter = has_perms.call_args[0][1]
self.assertIsNotNone(filter)
self.assertEqual(filter.count(), 1)
self.assertEqual(filter.first().pk, self.group.pk)
class RuntimeModel(object):
"""
Base class for tests of runtime model mixins.
"""
def setUp(self):
# Create the schema for our test model
self._style = no_style()
sql, _ = connection.creation.sql_create_model(self.model, self._style)
with connection.cursor() as c:
for statement in sql:
c.execute(statement)
content_model = self.content_model if hasattr(self, 'content_model') else self.model
app_config = apps.get_app_config(content_model._meta.app_label)
update_contenttypes(app_config, verbosity=1, interactive=False)
create_permissions(app_config, verbosity=1, interactive=False)
super(RuntimeModel, self).setUp()
def workaround_contenttype_cache_bug(self):
# workaround bug: https://code.djangoproject.com/ticket/10827
from django.contrib.contenttypes.models import ContentType
ContentType.objects.clear_cache()
def tearDown(self):
# Delete the schema for the test model
content_model = self.content_model if hasattr(self, 'content_model') else self.model
sql = connection.creation.sql_destroy_model(self.model, (), self._style)
with connection.cursor() as c:
for statement in sql:
c.execute(statement)
self.workaround_contenttype_cache_bug()
super(RuntimeModel, self).tearDown()
apps.get_app_config('trusts').models.pop(self.model._meta.model_name.lower())
class ContentModel(object):
def create_test_fixtures(self):
self.group = Group(name="Test Group")
self.group.save()
def get_perm_code(self, perm):
return '%s.%s' % (
perm.content_type.app_label, perm.codename
)
def set_perms(self):
for codename in ['change', 'add', 'delete', 'read']:
setattr(self, 'perm_%s' % codename,
Permission.objects.get_by_natural_key('%s_%s' % (codename, self.model_name), self.app_label, self.model_name)
)
def setUp(self):
super(ContentModel, self).setUp()
get_or_create_root_user(self)
call_command('create_trust_root')
create_test_users(self)
self.create_test_fixtures()
content_model = self.content_model if hasattr(self, 'content_model') else self.model
self.app_label = content_model._meta.app_label
self.model_name = content_model._meta.model_name
self.set_perms()
class ContentModelMixin(RuntimeModel, ContentModel):
class CategoryMixin(Content):
name = models.CharField(max_length=40, null=False, blank=False)
class Meta:
abstract = True
default_permissions = ('add', 'read', 'change', 'delete')
permissions = (
('add_topic_to_category', 'Add topic to a category'),
)
roles = (
('public', ('read_category', 'add_topic_to_category')),
('admin', ('read_category', 'add_category', 'change_category', 'add_topic_to_category')),
('write', ('read_category', 'change_category', 'add_topic_to_category')),
)
def setUp(self):
mixin = self.CategoryMixin
# Create a dummy model which extends the mixin
self.model = ModelBase('Category', (mixin, models.Model),
{'__module__': mixin.__module__})
super(ContentModelMixin, self).setUp()
def create_content(self, trust):
content = self.model(trust=trust)
content.save()
return content
def append_model_roles(self, rolename, perms):
self.model._meta.roles += ((rolename, perms, ), )
def remove_model_roles(self, rolename):
self.model._meta.roles = [row for row in self.model._meta.roles if row[0] != rolename]
def get_model_roles(self):
return self.model._meta.roles
class JunctionModelMixin(RuntimeModel, ContentModel):
class GroupJunctionMixin(Junction):
content = models.ForeignKey(Group, unique=True, null=False, blank=False)
name = models.CharField(max_length=40, null=False, blank=False)
class Meta:
abstract = True
content_roles = (
('public', ('read_group', 'add_topic_to_group')),
('admin', ('read_group', 'add_group', 'change_group', 'add_topic_to_group')),
('write', ('read_group', 'change_group', 'add_topic_to_group')),
)
def setUp(self):
mixin = self.GroupJunctionMixin
self.model = ModelBase('TestGroupJunction', (mixin, models.Model),
{'__module__': mixin.__module__})
self.content_model = Group
ctype = ContentType.objects.get_for_model(Group)
Permission.objects.get_or_create(codename='read_group', content_type=ctype)
Permission.objects.get_or_create(codename='add_topic_to_group', content_type=ctype)
super(JunctionModelMixin, self).setUp()
def append_model_roles(self, rolename, perms):
self.model._meta.content_roles += ((rolename, perms, ), )
def remove_model_roles(self, rolename):
self.model._meta.content_roles = [row for row in self.model._meta.content_roles if row[0] != rolename]
def get_model_roles(self):
return self.model._meta.content_roles
def create_content(self, trust):
import uuid
content = self.content_model(name=str(uuid.uuid4()))
content.save()
junction = self.model(content=content, trust=trust)
junction.save()
return content
class TrustAsContentMixin(ContentModel):
serialized_rollback = True
count = 0
def setUp(self):
self.model = Trust
self.content_model = Trust
super(TrustAsContentMixin, self).setUp()
def create_content(self, trust):
self.count += 1
content = Trust(title='Test Trust as Content %s' % self.count, trust=trust)
content.save()
return content
class TrustContentTestMixin(ContentModel):
def assertIsIterable(self, obj, msg='Not an iterable'):
return self.assertTrue(hasattr(obj, '__iter__'))
def test_unknown_content(self):
self.trust = Trust(settlor=self.user, trust=Trust.objects.get_root())
self.trust.save()
perm = TrustModelBackend().get_group_permissions(self.user, {})
self.assertIsNotNone(perm)
self.assertIsIterable(perm)
self.assertEqual(len(perm), 0)
trusts = Trust.objects.filter_by_content(self.user)
self.assertEqual(trusts.count(), 0)
def test_user_not_in_group_has_no_perm(self):
self.trust = Trust(settlor=self.user, trust=Trust.objects.get_root(), title='trust 1')
self.trust.save()
self.content = self.create_content(self.trust)
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
reload_test_users(self)
self.perm_change.group_set.add(self.group)
self.perm_change.save()
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
self.assertFalse(had)
def test_user_in_group_has_no_perm(self):
self.trust = Trust(settlor=self.user, trust=Trust.objects.get_root())
self.trust.save()
self.content = self.create_content(self.trust)
self.test_user_not_in_group_has_no_perm()
reload_test_users(self)
self.user.groups.add(self.group)
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
self.assertFalse(had)
def test_user_in_group_has_perm(self):
self.trust = Trust(settlor=self.user, trust=Trust.objects.get_root(), title='a title')
self.trust.save()
self.content = self.create_content(self.trust)
self.trust1 = Trust(settlor=self.user1, trust=Trust.objects.get_root())
self.trust1.save()
self.test_user_in_group_has_no_perm()
reload_test_users(self)
self.trust.groups.add(self.group)
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
self.assertTrue(had)
had = self.user.has_perm(self.get_perm_code(self.perm_add), self.content)
self.assertFalse(had)
def test_has_perm(self):
self.trust = Trust(settlor=self.user, trust=Trust.objects.get_root())
self.trust.save()
self.content = self.create_content(self.trust)
self.trust1 = Trust(settlor=self.user1, trust=Trust.objects.get_root())
self.trust1.save()
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
self.assertFalse(had)
had = self.user.has_perm(self.get_perm_code(self.perm_add), self.content)
self.assertFalse(had)
trust = Trust(settlor=self.user, title='Test trusts')
trust.save()
reload_test_users(self)
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
self.assertFalse(had)
tup = TrustUserPermission(trust=self.trust, entity=self.user, permission=self.perm_change)
tup.save()
reload_test_users(self)
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content)
self.assertTrue(had)
def test_has_perm_disallow_no_perm_content(self):
self.test_has_perm()
self.content1 = self.create_content(self.trust1)
had = self.user.has_perm(self.get_perm_code(self.perm_change), self.content1)
self.assertFalse(had)
def test_has_perm_disallow_no_perm_perm(self):
self.test_has_perm()
had = self.user.has_perm(self.get_perm_code(self.perm_add), self.content)
self.assertFalse(had)
def test_get_or_create_default_trust(self):
trust, created = Trust.objects.get_or_create_settlor_default(self.user)
content = self.create_content(trust)
had = self.user.has_perm(self.get_perm_code(self.perm_change), content)
self.assertFalse(had)
tup = TrustUserPermission(trust=trust, entity=self.user, permission=self.perm_change)
tup.save()
reload_test_users(self)
had = self.user.has_perm(self.get_perm_code(self.perm_change), content)
self.assertTrue(had)
def test_has_perm_queryset(self):
self.test_has_perm()
self.content1 = self.create_content(self.trust)
reload_test_users(self)
content_model = self.content_model if hasattr(self, 'content_model') else self.model
qs = content_model.objects.filter(pk__in=[self.content.pk, self.content1.pk])
had = self.user.has_perm(self.get_perm_code(self.perm_change), qs)
self.assertTrue(had)
def test_mixed_trust_queryset(self):
self.test_has_perm()
self.content1 = self.create_content(self.trust1)
self.content2 = self.create_content(self.trust)
reload_test_users(self)
qs = self.model.objects.all()
had = self.user.has_perm(self.get_perm_code(self.perm_change), qs)
self.assertFalse(had)
def test_read_permissions_added(self):
ct = ContentType.objects.get_for_model(self.model)
self.assertIsNotNone(Permission.objects.get(
content_type=ct,
codename='%s_%s' % ('read', ct.model)
))
class RoleTestMixin(object):
def get_perm_codename(self, action):
return '%s_%s' % (action, self.model_name.lower())
def test_roles_in_meta(self):
self.assertIsNotNone(self.get_model_roles())
def test_roles_unique(self):
self.role = Role(name='abc')
self.role.save()
rp = RolePermission(role=self.role, permission=self.perm_change)
rp.save()
rp = RolePermission(role=self.role, permission=self.perm_delete)
rp.save()
try:
rp = RolePermission(role=role, permission=self.perm_change)
rp.save()
fail('Duplicate is not detected')
except:
pass
def test_has_perm(self):
get_or_create_root_user(self)
reload_test_users(self)
self.trust, created = Trust.objects.get_or_create_settlor_default(settlor=self.user)
call_command('update_roles_permissions')
self.content1 = self.create_content(self.trust)
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change)))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_read)))
self.group.user_set.add(self.user)
self.trust.groups.add(self.group)
Role.objects.get(name='public').groups.add(self.group)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
def test_has_perm_diff_roles_on_contents(self):
self.test_has_perm()
content2 = self.create_content(self.trust)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), content2))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), content2))
# diff trust, same group, same role
trust3 = Trust(settlor=self.user, title='trust 3')
trust3.save()
content3 = self.create_content(trust3)
reload_test_users(self)
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_read), content3))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), content3))
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
trust3.groups.add(self.group)
reload_test_users(self)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), content3))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), content3))
# make sure trust does not affect one another
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
# diff trust, diff group, stronger role, not in group
trust4 = Trust(settlor=self.user, title='trust 4')
trust4.save()
content4 = self.create_content(trust4)
group4 = Group(name='admin group')
group4.save()
Role.objects.get(name='admin').groups.add(group4)
reload_test_users(self)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), content3))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), content3))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_read), content4))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), content4))
# make sure trust does not affect one another
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
def test_has_perm_diff_group_on_contents(self):
self.test_has_perm()
# same trust, diff role, in different group
group3 = Group(name='write group')
group3.save()
Role.objects.get(name='write').groups.add(group3)
self.trust.groups.add(group3)
reload_test_users(self)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertFalse(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
group3.user_set.add(self.user)
reload_test_users(self)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
content3 = self.create_content(self.trust)
reload_test_users(self)
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), content3))
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_change), content3))
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_read), self.content1))
self.assertTrue(self.user.has_perm(self.get_perm_code(self.perm_change), self.content1))
def test_management_command_create_roles(self):
self.assertEqual(Role.objects.count(), 0)
self.assertEqual(RolePermission.objects.count(), 0)
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 3)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 9)
rp = Role.objects.get(name='public')
ra = Role.objects.get(name='admin')
rw = Role.objects.get(name='write')
self.assertEqual(rp.permissions.filter(content_type__app_label=self.app_label).count(), 2)
self.assertEqual(ra.permissions.filter(content_type__app_label=self.app_label).count(), 4)
ra.permissions.filter(content_type__app_label=self.app_label).get(codename=self.get_perm_codename('add_topic_to'))
ra.permissions.filter(content_type__app_label=self.app_label).get(codename=self.get_perm_codename('read'))
ra.permissions.filter(content_type__app_label=self.app_label).get(codename=self.get_perm_codename('add'))
ra.permissions.filter(content_type__app_label=self.app_label).get(codename=self.get_perm_codename('change'))
self.assertEqual(rp.permissions.filter(content_type__app_label=self.app_label).filter(codename=self.get_perm_codename('add_topic_to')).count(), 1)
self.assertEqual(rp.permissions.filter(content_type__app_label=self.app_label).filter(codename=self.get_perm_codename('add')).count(), 0)
self.assertEqual(rp.permissions.filter(content_type__app_label=self.app_label).filter(codename=self.get_perm_codename('change')).count(), 0)
# Make change and ensure we add items
self.append_model_roles('read', (self.get_perm_codename('read'),))
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 4)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 10)
rr = Role.objects.get(name='read')
self.assertEqual(rr.permissions.filter(content_type__app_label=self.app_label).count(), 1)
self.assertEqual(rr.permissions.filter(content_type__app_label=self.app_label).filter(codename=self.get_perm_codename('read')).count(), 1)
# Add
self.remove_model_roles('write')
self.append_model_roles('write', (self.get_perm_codename('change'), self.get_perm_codename('add'), self.get_perm_codename('add_topic_to'), self.get_perm_codename('read'),))
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 4)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 11)
# Remove
self.remove_model_roles('write')
self.append_model_roles('write', (self.get_perm_codename('change'), self.get_perm_codename('read'), ))
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 4)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 9)
# Remove 2
self.remove_model_roles('write')
self.remove_model_roles('read')
self.append_model_roles('write', (self.get_perm_codename('change'), ))
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 3)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 7)
# Run again
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 3)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 7)
# Add empty
self.append_model_roles('read', ())
call_command('update_roles_permissions')
rs = Role.objects.all()
self.assertEqual(rs.count(), 4)
rp = RolePermission.objects.filter(permission__content_type__app_label=self.app_label)
self.assertEqual(rp.count(), 7)
class TrustJunctionTestCase(TrustContentTestMixin, JunctionModelMixin, TransactionTestCase):
@unittest.expectedFailure
def test_read_permissions_added(self):
super(JunctionTestCase, self).test_read_permissions_added()
class TrustContentTestCase(TrustContentTestMixin, ContentModelMixin, TransactionTestCase):
pass
class TrustAsContentTestCase(TrustContentTestMixin, TrustAsContentMixin, TestCase):
pass
class RoleContentTestCase(RoleTestMixin, ContentModelMixin, TransactionTestCase):
pass
class RoleJunctionTestCase(RoleTestMixin, JunctionModelMixin, TransactionTestCase):
pass
|
|
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper for adding an include to a source file in the "right" place.
clang-format already provides header sorting functionality; however, the
functionality is limited to sorting headers within a block of headers surrounded
by blank lines (these are a heuristic to avoid clang breaking ordering for
headers sensitive to inclusion order, e.g. <windows.h>).
As a result, inserting a new header is a bit more complex than simply inserting
the new header at the top and running clang-format.
This script implements additional logic to:
- classify different blocks of headers by type (C system, C++ system, user)
- find the appropriate insertion point for the new header
- creating a new header block if necessary
As a bonus, it does *also* sort the includes, though any sorting disagreements
with clang-format should be resolved in favor of clang-format.
Usage:
tools/add_header.py --header '<utility>' foo/bar.cc foo/baz.cc foo/baz.h
"""
import argparse
import difflib
import os.path
import re
import sys
# The specific values of these constants are also used as a sort key for
# ordering different header types in the correct relative order.
_HEADER_TYPE_C_SYSTEM = 0
_HEADER_TYPE_CXX_SYSTEM = 1
_HEADER_TYPE_USER = 2
_HEADER_TYPE_INVALID = -1
def ClassifyHeader(decorated_name):
if IsCSystemHeader(decorated_name):
return _HEADER_TYPE_C_SYSTEM
elif IsCXXSystemHeader(decorated_name):
return _HEADER_TYPE_CXX_SYSTEM
elif IsUserHeader(decorated_name):
return _HEADER_TYPE_USER
else:
return _HEADER_TYPE_INVALID
def UndecoratedName(decorated_name):
"""Returns the undecorated version of decorated_name by removing "" or <>."""
assert IsSystemHeader(decorated_name) or IsUserHeader(decorated_name)
return decorated_name[1:-1]
def IsSystemHeader(decorated_name):
"""Returns true if decorated_name looks like a system header."""
return decorated_name[0] == '<' and decorated_name[-1] == '>'
def IsCSystemHeader(decorated_name):
"""Returns true if decoraed_name looks like a C system header."""
return IsSystemHeader(decorated_name) and UndecoratedName(
decorated_name).endswith('.h')
def IsCXXSystemHeader(decorated_name):
"""Returns true if decoraed_name looks like a C++ system header."""
return IsSystemHeader(
decorated_name) and not UndecoratedName(decorated_name).endswith('.h')
def IsUserHeader(decorated_name):
"""Returns true if decoraed_name looks like a user header."""
return decorated_name[0] == '"' and decorated_name[-1] == '"'
_EMPTY_LINE_RE = re.compile(r'\s*$')
_COMMENT_RE = re.compile(r'\s*//(.*)$')
_INCLUDE_RE = re.compile(
r'\s*#(import|include)\s+([<"].+?[">])\s*?(?://(.*))?$')
def FindIncludes(lines):
"""Finds the block of #includes, assuming Google+Chrome C++ style source.
Note that this doesn't simply return a slice of the input lines, because
having the actual indices simplifies things when generatingn the updated
source text.
Args:
lines: The source text split into lines.
Returns:
A tuple of begin, end indices that can be used to slice the input lines to
contain the includes to process. Returns -1, -1 if no such block of
input lines could be found.
"""
begin = end = -1
for idx, line in enumerate(lines):
# Skip over any initial comments (e.g. the copyright boilerplate) or empty
# lines.
# TODO(dcheng): This means that any preamble comment associated with the
# first header will be dropped. So far, this hasn't broken anything, but
# maybe this needs to be more clever.
# TODO(dcheng): #define and #undef should probably also be allowed.
if _EMPTY_LINE_RE.match(line) or _COMMENT_RE.match(line):
continue
m = _INCLUDE_RE.match(line)
if not m:
if begin < 0:
# No match, but no #includes have been seen yet. Keep scanning for the
# first #include.
continue
# Give up, it's something weird that probably requires manual
# intervention.
break
if begin < 0:
begin = idx
end = idx + 1
return begin, end
class Include(object):
"""Represents an #include/#import and any interesting metadata for it.
Attributes:
decorated_name: The name of the header file, decorated with <> for system
headers or "" for user headers.
directive: 'include' or 'import'
TODO(dcheng): In the future, this may need to support C++ modules.
preamble: Any comment lines that precede this include line, e.g.:
// This is a preamble comment
// for a header file.
#include <windows.h>
would have a preamble of
['// This is a preamble comment', '// for a header file.'].
inline_comment: Any comment that comes after the #include on the same line,
e.g.
#include <windows.h> // For CreateWindowExW()
would be parsed with an inline comment of ' For CreateWindowExW'.
header_type: The header type corresponding to decorated_name as determined
by ClassifyHeader().
is_primary_header: True if this is the primary related header of a C++
implementation file. Any primary header will be sorted to the top in its
own separate block.
"""
def __init__(self, decorated_name, directive, preamble, inline_comment):
self.decorated_name = decorated_name
assert directive == 'include' or directive == 'import'
self.directive = directive
self.preamble = preamble
self.inline_comment = inline_comment
self.header_type = ClassifyHeader(decorated_name)
assert self.header_type != _HEADER_TYPE_INVALID
self.is_primary_header = False
def __repr__(self):
return str((self.decorated_name, self.directive, self.preamble,
self.inline_comment, self.header_type, self.is_primary_header))
def ShouldInsertNewline(self, previous_include):
# Per the Google C++ style guide, different blocks of headers should be
# separated by an empty line.
return (self.is_primary_header != previous_include.is_primary_header
or self.header_type != previous_include.header_type)
def ToSource(self):
"""Generates a C++ source representation of this include."""
source = []
source.extend(self.preamble)
include_line = '#%s %s' % (self.directive, self.decorated_name)
if self.inline_comment:
include_line = include_line + ' //' + self.inline_comment
source.append(include_line)
return [line.rstrip() for line in source]
def ParseIncludes(lines):
"""Parses lines into a list of Include objects. Returns None on failure.
Args:
lines: A list of strings representing C++ source text.
Returns:
A list of Include objects representing the parsed input lines, or None if
the input lines could not be parsed.
"""
includes = []
preamble = []
for line in lines:
if _EMPTY_LINE_RE.match(line):
if preamble:
# preamble contents are flushed when an #include directive is matched.
# If preamble is non-empty, that means there is a preamble separated
# from its #include directive by at least one newline. Just give up,
# since the sorter has no idea how to preserve structure in this case.
return None
continue
m = _INCLUDE_RE.match(line)
if not m:
preamble.append(line)
continue
includes.append(Include(m.group(2), m.group(1), preamble, m.group(3)))
preamble = []
# In theory, the caller should never pass a list of lines with a dangling
# preamble. But there's a test case that exercises this, and just in case it
# actually happens, fail a bit more gracefully.
if preamble:
return None
return includes
def _DecomposePath(filename):
"""Decomposes a filename into a list of directories and the basename.
Args:
filename: A filename!
Returns:
A tuple of a list of directories and a string basename.
"""
dirs = []
dirname, basename = os.path.split(filename)
while dirname:
dirname, last = os.path.split(dirname)
dirs.append(last)
dirs.reverse()
# Remove the extension from the basename.
basename = os.path.splitext(basename)[0]
return dirs, basename
_PLATFORM_SUFFIX = (
r'(?:_(?:android|aura|chromeos|ios|linux|mac|ozone|posix|win|x11))?')
_TEST_SUFFIX = r'(?:_(?:browser|interactive_ui|ui|unit)?test)?'
def MarkPrimaryInclude(includes, filename):
"""Finds the primary header in includes and marks it as such.
Per the style guide, if moo.cc's main purpose is to implement or test the
functionality in moo.h, moo.h should be ordered first in the includes.
Args:
includes: A list of Include objects.
filename: The filename to use as the basis for finding the primary header.
"""
# Header files never have a primary include.
if filename.endswith('.h'):
return
basis = _DecomposePath(filename)
# The list of includes is searched in reverse order of length. Even though
# matching is fuzzy, moo_posix.h should take precedence over moo.h when
# considering moo_posix.cc.
includes.sort(key=lambda i: -len(i.decorated_name))
for include in includes:
if include.header_type != _HEADER_TYPE_USER:
continue
to_test = _DecomposePath(UndecoratedName(include.decorated_name))
# If the basename to test is longer than the basis, just skip it and
# continue. moo.c should never match against moo_posix.h.
if len(to_test[1]) > len(basis[1]):
continue
# The basename in the two paths being compared need to fuzzily match.
# This allows for situations where moo_posix.cc implements the interfaces
# defined in moo.h.
escaped_basename = re.escape(to_test[1])
if not (re.match(escaped_basename + _PLATFORM_SUFFIX + _TEST_SUFFIX + '$',
basis[1]) or
re.match(escaped_basename + _TEST_SUFFIX + _PLATFORM_SUFFIX + '$',
basis[1])):
continue
# The topmost directory name must match, and the rest of the directory path
# should be 'substantially similar'.
s = difflib.SequenceMatcher(None, to_test[0], basis[0])
first_matched = False
total_matched = 0
for match in s.get_matching_blocks():
if total_matched == 0 and match.a == 0 and match.b == 0:
first_matched = True
total_matched += match.size
if not first_matched:
continue
# 'Substantially similar' is defined to be:
# - no more than two differences
# - at least one match besides the topmost directory
total_differences = abs(total_matched -
len(to_test[0])) + abs(total_matched -
len(basis[0]))
# Note: total_differences != 0 is mainly intended to allow more succinct
# tests (otherwise tests with just a basename would always trip the
# total_matched < 2 check).
if total_differences != 0 and (total_differences > 2 or total_matched < 2):
continue
include.is_primary_header = True
return
def SerializeIncludes(includes):
"""Turns includes back into the corresponding C++ source text.
This function assumes that the list of input Include objects is already sorted
according to Google style.
Args:
includes: a list of Include objects.
Returns:
A list of strings representing C++ source text.
"""
source = []
# Assume there's always at least one include.
previous_include = None
for include in includes:
if previous_include and include.ShouldInsertNewline(previous_include):
source.append('')
source.extend(include.ToSource())
previous_include = include
return source
def InsertHeaderIntoSource(filename, source, decorated_name):
"""Inserts the specified header into some source text, if needed.
Args:
filename: The name of the source file.
source: A string containing the contents of the source file.
decorated_name: The decorated name of the header to insert.
Returns:
None on failure or the modified source text on success.
"""
lines = source.splitlines()
begin, end = FindIncludes(lines)
# No #includes in this file. Just give up.
# TODO(dcheng): Be more clever and insert it after the file-level comment or
# include guard as appropriate.
if begin < 0:
print(f'Skipping {filename}: unable to find includes!')
return None
includes = ParseIncludes(lines[begin:end])
if not includes:
print(f'Skipping {filename}: unable to parse includes!')
return None
if decorated_name in [i.decorated_name for i in includes]:
# Nothing to do.
print(f'Skipping {filename}: no changes required!')
return source
MarkPrimaryInclude(includes, filename)
includes.append(Include(decorated_name, 'include', [], None))
# TODO(dcheng): It may be worth considering adding special sorting heuristics
# for windows.h, et cetera.
def SortKey(include):
return (not include.is_primary_header, include.header_type,
include.decorated_name)
includes.sort(key=SortKey)
lines[begin:end] = SerializeIncludes(includes)
lines.append('') # To avoid eating the newline at the end of the file.
return '\n'.join(lines)
def main():
parser = argparse.ArgumentParser(
description='Mass insert a new header into a bunch of files.')
parser.add_argument(
'--header',
help='The decorated filename of the header to insert (e.g. "a" or <a>)',
required=True)
parser.add_argument('files', nargs='+')
args = parser.parse_args()
if ClassifyHeader(args.header) == _HEADER_TYPE_INVALID:
print('--header argument must be a decorated filename, e.g.')
print(' --header "<utility>"')
print('or')
print(' --header \'"moo.h"\'')
return 1
print(f'Inserting #include {args.header}...')
for filename in args.files:
with open(filename, 'r') as f:
new_source = InsertHeaderIntoSource(os.path.normpath(filename), f.read(),
args.header)
if not new_source:
continue
with open(filename, 'w') as f:
f.write(new_source)
if __name__ == '__main__':
sys.exit(main())
|
|
from sqlalchemy.testing import assert_raises, \
assert_raises_message, eq_
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import mapper, relationship, Session, \
exc as orm_exc, sessionmaker, backref
from sqlalchemy.testing import fixtures
class M2MTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('place', metadata,
Column('place_id', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('name', String(30), nullable=False),
test_needs_acid=True,
)
Table('transition', metadata,
Column('transition_id', Integer,
test_needs_autoincrement=True, primary_key=True),
Column('name', String(30), nullable=False),
test_needs_acid=True,
)
Table('place_thingy', metadata,
Column('thingy_id', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('place_id', Integer, ForeignKey('place.place_id'),
nullable=False),
Column('name', String(30), nullable=False),
test_needs_acid=True,
)
# association table #1
Table('place_input', metadata,
Column('place_id', Integer, ForeignKey('place.place_id')),
Column('transition_id', Integer,
ForeignKey('transition.transition_id')),
test_needs_acid=True,
)
# association table #2
Table('place_output', metadata,
Column('place_id', Integer, ForeignKey('place.place_id')),
Column('transition_id', Integer,
ForeignKey('transition.transition_id')),
test_needs_acid=True,
)
Table('place_place', metadata,
Column('pl1_id', Integer, ForeignKey('place.place_id')),
Column('pl2_id', Integer, ForeignKey('place.place_id')),
test_needs_acid=True,
)
@classmethod
def setup_classes(cls):
class Place(cls.Basic):
def __init__(self, name):
self.name = name
class PlaceThingy(cls.Basic):
def __init__(self, name):
self.name = name
class Transition(cls.Basic):
def __init__(self, name):
self.name = name
def test_overlapping_attribute_error(self):
place, Transition, place_input, Place, transition = (self.tables.place,
self.classes.Transition,
self.tables.place_input,
self.classes.Place,
self.tables.transition)
mapper(Place, place, properties={
'transitions': relationship(Transition,
secondary=place_input, backref='places')
})
mapper(Transition, transition, properties={
'places': relationship(Place,
secondary=place_input, backref='transitions')
})
assert_raises_message(sa.exc.ArgumentError,
"property of that name exists",
sa.orm.configure_mappers)
def test_self_referential_roundtrip(self):
place, Place, place_place = (self.tables.place,
self.classes.Place,
self.tables.place_place)
mapper(Place, place, properties={
'places': relationship(
Place,
secondary=place_place,
primaryjoin=place.c.place_id == place_place.c.pl1_id,
secondaryjoin=place.c.place_id == place_place.c.pl2_id,
order_by=place_place.c.pl2_id
)
})
sess = Session()
p1 = Place('place1')
p2 = Place('place2')
p3 = Place('place3')
p4 = Place('place4')
p5 = Place('place5')
p6 = Place('place6')
p7 = Place('place7')
sess.add_all((p1, p2, p3, p4, p5, p6, p7))
p1.places.append(p2)
p1.places.append(p3)
p5.places.append(p6)
p6.places.append(p1)
p7.places.append(p1)
p1.places.append(p5)
p4.places.append(p3)
p3.places.append(p4)
sess.commit()
eq_(p1.places, [p2, p3, p5])
eq_(p5.places, [p6])
eq_(p7.places, [p1])
eq_(p6.places, [p1])
eq_(p4.places, [p3])
eq_(p3.places, [p4])
eq_(p2.places, [])
def test_self_referential_bidirectional_mutation(self):
place, Place, place_place = (self.tables.place,
self.classes.Place,
self.tables.place_place)
mapper(Place, place, properties={
'child_places': relationship(
Place,
secondary=place_place,
primaryjoin=place.c.place_id == place_place.c.pl1_id,
secondaryjoin=place.c.place_id == place_place.c.pl2_id,
order_by=place_place.c.pl2_id,
backref='parent_places'
)
})
sess = Session()
p1 = Place('place1')
p2 = Place('place2')
p2.parent_places = [p1]
sess.add_all([p1, p2])
p1.parent_places.append(p2)
sess.commit()
assert p1 in p2.parent_places
assert p2 in p1.parent_places
def test_joinedload_on_double(self):
"""test that a mapper can have two eager relationships to the same table, via
two different association tables. aliases are required."""
place_input, transition, Transition, PlaceThingy, \
place, place_thingy, Place, \
place_output = (self.tables.place_input,
self.tables.transition,
self.classes.Transition,
self.classes.PlaceThingy,
self.tables.place,
self.tables.place_thingy,
self.classes.Place,
self.tables.place_output)
mapper(PlaceThingy, place_thingy)
mapper(Place, place, properties={
'thingies': relationship(PlaceThingy, lazy='joined')
})
mapper(Transition, transition, properties=dict(
inputs=relationship(Place, place_output, lazy='joined'),
outputs=relationship(Place, place_input, lazy='joined'),
)
)
tran = Transition('transition1')
tran.inputs.append(Place('place1'))
tran.outputs.append(Place('place2'))
tran.outputs.append(Place('place3'))
sess = Session()
sess.add(tran)
sess.commit()
r = sess.query(Transition).all()
self.assert_unordered_result(r, Transition,
{'name': 'transition1',
'inputs': (Place, [{'name': 'place1'}]),
'outputs': (Place, [{'name': 'place2'}, {'name': 'place3'}])
})
def test_bidirectional(self):
place_input, transition, Transition, Place, place, place_output = (
self.tables.place_input,
self.tables.transition,
self.classes.Transition,
self.classes.Place,
self.tables.place,
self.tables.place_output)
mapper(Place, place)
mapper(Transition, transition, properties=dict(
inputs=relationship(Place, place_output,
backref=backref('inputs',
order_by=transition.c.transition_id),
order_by=Place.place_id),
outputs=relationship(Place, place_input,
backref=backref('outputs',
order_by=transition.c.transition_id),
order_by=Place.place_id),
)
)
t1 = Transition('transition1')
t2 = Transition('transition2')
t3 = Transition('transition3')
p1 = Place('place1')
p2 = Place('place2')
p3 = Place('place3')
t1.inputs.append(p1)
t1.inputs.append(p2)
t1.outputs.append(p3)
t2.inputs.append(p1)
p2.inputs.append(t2)
p3.inputs.append(t2)
p1.outputs.append(t1)
sess = Session()
sess.add_all((t1, t2, t3, p1, p2, p3))
sess.commit()
self.assert_result([t1],
Transition, {'outputs':
(Place, [{'name': 'place3'}, {'name': 'place1'}])})
self.assert_result([p2],
Place, {'inputs':
(Transition, [{'name': 'transition1'},
{'name': 'transition2'}])})
@testing.requires.sane_multi_rowcount
def test_stale_conditions(self):
Place, Transition, place_input, place, transition = (
self.classes.Place,
self.classes.Transition,
self.tables.place_input,
self.tables.place,
self.tables.transition)
mapper(Place, place, properties={
'transitions': relationship(Transition, secondary=place_input,
passive_updates=False)
})
mapper(Transition, transition)
p1 = Place('place1')
t1 = Transition('t1')
p1.transitions.append(t1)
sess = sessionmaker()()
sess.add_all([p1, t1])
sess.commit()
p1.place_id
p1.transitions
sess.execute("delete from place_input", mapper=Place)
p1.place_id = 7
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'place_input' expected to "
r"update 1 row\(s\); Only 0 were matched.",
sess.commit
)
sess.rollback()
p1.place_id
p1.transitions
sess.execute("delete from place_input", mapper=Place)
p1.transitions.remove(t1)
assert_raises_message(
orm_exc.StaleDataError,
r"DELETE statement on table 'place_input' expected to "
r"delete 1 row\(s\); Only 0 were matched.",
sess.commit
)
class AssortedPersistenceTests(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("left", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
Table("right", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)),
)
Table('secondary', metadata,
Column('left_id', Integer, ForeignKey('left.id'),
primary_key=True),
Column('right_id', Integer, ForeignKey('right.id'),
primary_key=True),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
def _standard_bidirectional_fixture(self):
left, secondary, right = self.tables.left, \
self.tables.secondary, self.tables.right
A, B = self.classes.A, self.classes.B
mapper(A, left, properties={
'bs': relationship(B, secondary=secondary,
backref='as', order_by=right.c.id)
})
mapper(B, right)
def _bidirectional_onescalar_fixture(self):
left, secondary, right = self.tables.left, \
self.tables.secondary, self.tables.right
A, B = self.classes.A, self.classes.B
mapper(A, left, properties={
'bs': relationship(B, secondary=secondary,
backref=backref('a', uselist=False),
order_by=right.c.id)
})
mapper(B, right)
def test_session_delete(self):
self._standard_bidirectional_fixture()
A, B = self.classes.A, self.classes.B
secondary = self.tables.secondary
sess = Session()
sess.add_all([
A(data='a1', bs=[B(data='b1')]),
A(data='a2', bs=[B(data='b2')])
])
sess.commit()
a1 = sess.query(A).filter_by(data='a1').one()
sess.delete(a1)
sess.flush()
eq_(sess.query(secondary).count(), 1)
a2 = sess.query(A).filter_by(data='a2').one()
sess.delete(a2)
sess.flush()
eq_(sess.query(secondary).count(), 0)
def test_remove_scalar(self):
# test setting a uselist=False to None
self._bidirectional_onescalar_fixture()
A, B = self.classes.A, self.classes.B
secondary = self.tables.secondary
sess = Session()
sess.add_all([
A(data='a1', bs=[B(data='b1'), B(data='b2')]),
])
sess.commit()
a1 = sess.query(A).filter_by(data='a1').one()
b2 = sess.query(B).filter_by(data='b2').one()
assert b2.a is a1
b2.a = None
sess.commit()
eq_(a1.bs, [B(data='b1')])
eq_(b2.a, None)
eq_(sess.query(secondary).count(), 1)
|
|
"""
This module has all the classes and functions related to waves in optics.
**Contains**
* TWave
"""
from __future__ import print_function, division
__all__ = ['TWave']
from sympy import (sympify, pi, sin, cos, sqrt, Symbol, S,
symbols, Derivative, atan2)
from sympy.core.expr import Expr
from sympy.physics.units import speed_of_light, meter, second
c = speed_of_light.convert_to(meter/second)
class TWave(Expr):
r"""
This is a simple transverse sine wave travelling in a one-dimensional space.
Basic properties are required at the time of creation of the object,
but they can be changed later with respective methods provided.
It is represented as :math:`A \times cos(k*x - \omega \times t + \phi )`,
where :math:`A` is the amplitude, :math:`\omega` is the angular velocity,
:math:`k` is the wavenumber (spatial frequency), :math:`x` is a spatial variable
to represent the position on the dimension on which the wave propagates,
and :math:`\phi` is the phase angle of the wave.
Arguments
=========
amplitude : Sympifyable
Amplitude of the wave.
frequency : Sympifyable
Frequency of the wave.
phase : Sympifyable
Phase angle of the wave.
time_period : Sympifyable
Time period of the wave.
n : Sympifyable
Refractive index of the medium.
Raises
=======
ValueError : When neither frequency nor time period is provided
or they are not consistent.
TypeError : When anything other than TWave objects is added.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A1, phi1, A2, phi2, f = symbols('A1, phi1, A2, phi2, f')
>>> w1 = TWave(A1, f, phi1)
>>> w2 = TWave(A2, f, phi2)
>>> w3 = w1 + w2 # Superposition of two waves
>>> w3
TWave(sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2), f,
atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2)))
>>> w3.amplitude
sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)
>>> w3.phase
atan2(A1*cos(phi1) + A2*cos(phi2), A1*sin(phi1) + A2*sin(phi2))
>>> w3.speed
299792458*meter/(second*n)
>>> w3.angular_velocity
2*pi*f
"""
def __init__(
self,
amplitude,
frequency=None,
phase=S.Zero,
time_period=None,
n=Symbol('n')):
frequency = sympify(frequency)
amplitude = sympify(amplitude)
phase = sympify(phase)
time_period = sympify(time_period)
n = sympify(n)
self._frequency = frequency
self._amplitude = amplitude
self._phase = phase
self._time_period = time_period
self._n = n
if time_period is not None:
self._frequency = 1/self._time_period
if frequency is not None:
self._time_period = 1/self._frequency
if time_period is not None:
if frequency != 1/time_period:
raise ValueError("frequency and time_period should be consistent.")
if frequency is None and time_period is None:
raise ValueError("Either frequency or time period is needed.")
@property
def frequency(self):
"""
Returns the frequency of the wave,
in cycles per second.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.frequency
f
"""
return self._frequency
@property
def time_period(self):
"""
Returns the temporal period of the wave,
in seconds per cycle.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.time_period
1/f
"""
return self._time_period
@property
def wavelength(self):
"""
Returns the wavelength (spatial period) of the wave,
in meters per cycle.
It depends on the medium of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavelength
299792458*meter/(second*f*n)
"""
return c/(self._frequency*self._n)
@property
def amplitude(self):
"""
Returns the amplitude of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.amplitude
A
"""
return self._amplitude
@property
def phase(self):
"""
Returns the phase angle of the wave,
in radians.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.phase
phi
"""
return self._phase
@property
def speed(self):
"""
Returns the propagation speed of the wave,
in meters per second.
It is dependent on the propagation medium.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.speed
299792458*meter/(second*n)
"""
return self.wavelength*self._frequency
@property
def angular_velocity(self):
"""
Returns the angular velocity of the wave,
in radians per second.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.angular_velocity
2*pi*f
"""
return 2*pi*self._frequency
@property
def wavenumber(self):
"""
Returns the wavenumber of the wave,
in radians per meter.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavenumber
pi*second*f*n/(149896229*meter)
"""
return 2*pi/self.wavelength
def __str__(self):
"""String representation of a TWave."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
__repr__ = __str__
def __add__(self, other):
"""
Addition of two waves will result in their superposition.
The type of interference will depend on their phase angles.
"""
if isinstance(other, TWave):
if self._frequency == other._frequency and self.wavelength == other.wavelength:
return TWave(sqrt(self._amplitude**2 + other._amplitude**2 + 2 *
self.amplitude*other.amplitude*cos(
self._phase - other.phase)),
self.frequency,
atan2(self._amplitude*cos(self._phase)
+other._amplitude*cos(other._phase),
self._amplitude*sin(self._phase)
+other._amplitude*sin(other._phase))
)
else:
raise NotImplementedError("Interference of waves with different frequencies"
" has not been implemented.")
else:
raise TypeError(type(other).__name__ + " and TWave objects can't be added.")
def _eval_rewrite_as_sin(self, *args, **kwargs):
return self._amplitude*sin(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self._phase + pi/2, evaluate=False)
def _eval_rewrite_as_cos(self, *args, **kwargs):
return self._amplitude*cos(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self._phase)
def _eval_rewrite_as_pde(self, *args, **kwargs):
from sympy import Function
mu, epsilon, x, t = symbols('mu, epsilon, x, t')
E = Function('E')
return Derivative(E(x, t), x, 2) + mu*epsilon*Derivative(E(x, t), t, 2)
def _eval_rewrite_as_exp(self, *args, **kwargs):
from sympy import exp, I
return self._amplitude*exp(I*(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self._phase))
|
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Commander gather Variables to set them with an UpdateList.
The command process can be AllSetsForEach (ie a map of the update succesively for each)
or a EachSetForAll (ie each set is a map of each).
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Itemizers.Pather"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Itemizers import Getter,Setter
#</ImportSpecificModules>
#<DefineLocals>
CommandPrefixStr="--"
CommandWalkStr="..."
CommandSelfStr="/"
CommandAddStr="+"
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class CommanderClass(BaseClass):
def default_init(
self,
_CommandingGetVariable=None,
_CommandingSetVariable=None,
_CommandingOrderStr="AllSetsForEachGet",
_CommandingBeforeWalkRigidBool=False,
_CommandingAfterWalkRigidBool=False,
_CommandingBeforeSelfRigidBool=False,
_CommandingAfterSelfRigidBool=False,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_command(self):
""" """
#/####################/#
# Adapt the type for getting things to command
#
#debug
'''
self.debug(
[
'Adapt the type for getting things to command',
("self.",self,[
'CommandingGetVariable',
'CommandingSetVariable'
])
]
)
'''
#Check
if type(self.CommandingGetVariable)!=list:
#debug
'''
self.debug(
[
'We get nicely',
('self.',self,['CommandingGetVariable'])
]
)
'''
#get
CommandedValueVariablesList=self[
self.CommandingGetVariable
]
#Check
if type(CommandedValueVariablesList)!=list:
CommandedValueVariablesList=[CommandedValueVariablesList]
else:
#map a get
CommandedValueVariablesList=map(
lambda __CommandingGetVariable:
self[__CommandingGetVariable],
self.CommandingGetVariable
)
#flat maybe
CommandedValueVariablesList=SYS.flat(CommandedValueVariablesList)
#filter
CommandedValueVariablesList=SYS.filterNone(CommandedValueVariablesList)
#debug
'''
self.debug(
[
'in the end, CommandedValueVariablesList is ',
SYS._str(CommandedValueVariablesList)
]
)
'''
#/###################/#
# Check if we have to walk before
#
#Check
if self.CommandingBeforeWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk before the command',
'CommandedValueVariablesList is '+SYS._str(CommandedValueVariablesList),
'self.getDoing(SYS.CommanderClass).values() is '+SYS._str
(self.getDoing(
SYS.CommanderClass).values())
]
)
'''
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#set
CommandedOrderedDict=self.getDoing(
SYS.CommanderClass
)
CommandedOrderedDict['CommandingBeforeSelfRigidBool']=False
CommandedLiargVariablesList=CommandedOrderedDict.values()
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*CommandedLiargVariablesList
).set(
'GettingNewBool',True
),
CommandedValueVariablesList
)
#/####################/#
# Adapt the type for setting things in the commanded variables
#
#debug
'''
self.debug(
[
'Adapt the type for setting things in the commanded variables',
("self.",self,['CommandingSetVariable'])
]
)
'''
#Check
if type(self.CommandingSetVariable)!=list:
#Check
if hasattr(self.CommandingSetVariable,'items'):
#items
CommandedSetVariablesList=self.CommandingSetVariable.items()
elif type(self.CommandingSetVariable
)==str and self.CommandingSetVariable.startswith(
Getter.GetCallPrefixStr
):
#list
CommandedSetVariablesList=[
('get',self.CommandingSetVariable)
]
else:
#list
CommandedSetVariablesList=[
self.CommandingSetVariable
]
else:
#alias
CommandedSetVariablesList=self.CommandingSetVariable
#debug
'''
self.debug(
[
'in the end, CommandedSetVariablesList is ',
SYS._str(CommandedSetVariablesList)
]
)
'''
#/###################/#
# Ok now we command locally
#
#Check
if self.CommandingBeforeSelfRigidBool:
#debug
'''
self.debug(
'We command before self here'
)
'''
#add
self[Setter.SetMapStr](CommandedSetVariablesList)
#Check for the order
if self.CommandingOrderStr=="AllSetsForEachGet":
#map
map(
lambda __CommandedValueVariable:
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
),
CommandedSetVariablesList
),
CommandedValueVariablesList
)
elif self.CommandingOrderStr=="EachSetForAllGets":
#map
map(
lambda __CommandedSetVariable:
map(
lambda __CommandedValueVariables:
__CommandedValueVariables.set(
*__CommandedSetVariable
),
CommandedValueVariablesList
),
CommandedSetVariablesList
)
#Check
if self.CommandingAfterSelfRigidBool:
#debug
'''
self.debug(
'We command after self here'
)
'''
#add
self[Setter.SetMapStr](CommandedSetVariablesList)
#/###################/#
# And we check for a walk after
#
#Check
if self.CommandingAfterWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk the command',
'CommandedValueVariablesList is '+SYS._str(CommandedValueVariablesList)
]
)
'''
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#set
CommandedOrderedDict=self.getDoing(
SYS.CommanderClass
)
CommandedOrderedDict['CommandingBeforeSelfRigidBool']=False
CommandedLiargVariablesList=CommandedOrderedDict.values()
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*CommandedLiargVariablesList
).set(
'GettingNewBool',True
),
CommandedValueVariablesList
)
def mimic_get(self):
#debug
'''
self.debug(
('self.',self,[
'GettingKeyVariable',
])
)
'''
#Check
if type(self.GettingKeyVariable)==str:
#Check
if self.GettingKeyVariable.startswith(CommandAddStr):
#split
AddGetKeyStrsList=self.GettingKeyVariable.split(CommandAddStr)[1:]
#debug
'''
self.debug(
[
'We map get',
'AddGetKeyStrsList is '+str(AddGetKeyStrsList)
]
)
'''
#map get
AddVariablesList=self[
Getter.GetMapStr
](*AddGetKeyStrsList).ItemizedMapValueVariablesList
#debug
'''
self.debug(
[
'We sum now',
'AddVariablesList is '+SYS._str(AddVariablesList)
]
)
'''
#map get
self.GettedValueVariable=SYS.sum(AddVariablesList)
#return
return {'HookingIsBool':False}
#return
return BaseClass.get(self)
def mimic_set(self):
#debug
'''
self.debug(
('self.',self,[
'SettingKeyVariable',
'SettingValueVariable'
])
)
'''
#Check
if type(self.SettingKeyVariable)==str:
#Check
if self.SettingKeyVariable.startswith(
CommandPrefixStr
):
#debug
'''
self.debug(
'We command here'
)
'''
#deprefix
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandPrefixStr
)
#Check
if CommandGetKeyStr.startswith(CommandWalkStr):
#debug
'''
self.debug(
'We command-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr+CommandWalkStr):
#debug
'''
self.debug(
'We command-self-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True,
_SelfBool=True
)
#stop the setting
return {'HookingIsBool':False}
else:
#command
self.command(
CommandGetKeyStr,
self.SettingValueVariable
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandWalkStr
):
#debug
'''
self.debug(
'We walk-command here'
)
'''
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandWalkStr
)
#Check
if CommandGetKeyStr.startswith(CommandPrefixStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_AfterSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandSelfStr+CommandWalkStr+CommandPrefixStr
):
#command
self.command(
SYS.deprefix(
self.SettingKeyVariable,
CommandSelfStr+CommandWalkStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_BeforeSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#debug
'''
self.debug(
[
'Call the base set method',
'BaseClass is '+str(BaseClass),
('self.',self,['SettingKeyVariable'])
]
)
'''
#Call the base method
BaseClass.set(self)
#</DefineClass>
#</DefinePrint>
CommanderClass.PrintingClassSkipKeyStrsList.extend(
[
'CommandingGetVariable',
'CommandingSetVariable',
'CommandingOrderStr',
'CommandingBeforeWalkRigidBool',
'CommandingAfterWalkRigidBool',
'CommandingBeforeSelfRigidBool',
'CommandingAfterSelfRigidBool'
]
)
#<DefinePrint>
|
|
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import os
import numpy as np
import montage_wrapper as montage
import shutil
import sys
import glob
import time
from matplotlib.path import Path
from scipy.ndimage import zoom
from pdb import set_trace
_TOP_DIR = '/data/tycho/0/leroy.42/allsky/'
_INDEX_DIR = os.path.join(_TOP_DIR, 'code/')
_HOME_DIR = '/n/home00/lewis.1590/research/galbase_allsky/'
_MOSAIC_DIR = os.path.join(_HOME_DIR, 'cutouts')
def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):
overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)
#TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER
mean_dec = (min_dec + max_dec) * 0.5
if np.abs(dec_ctr) + pad > 88.0:
return overlap
ra_pad = pad / np.cos(np.radians(mean_dec))
# MERIDIAN CASES
merid = np.where(max_ra < min_ra)
overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]
# BORING CASE
normal = np.where(max_ra > min_ra)
overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]
return overlap
def make_axes(hdr, quiet=False, novec=False, vonly=False, simple=False):
# PULL THE IMAGE/CUBE SIZES FROM THE HEADER
naxis = hdr['NAXIS']
naxis1 = hdr['NAXIS1']
naxis2 = hdr['NAXIS2']
if naxis > 2:
naxis3 = hdr['NAXIS3']
## EXTRACT FITS ASTROMETRY STRUCTURE
ww = pywcs.WCS(hdr)
#IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)
if naxis > 3:
#GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER
cd = ww.wcs.cd
crpix = ww.wcs.crpix
cdelt = ww.wcs.crelt
crval = ww.wcs.crval
if naxis > 2:
# MAKE THE VELOCITY AXIS (WILL BE M/S)
v = np.arange(naxis3) * 1.0
vdif = v - (hdr['CRPIX3']-1)
vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])
# CUT OUT HERE IF WE ONLY WANT VELOCITY INFO
if vonly:
return vaxis
#IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:
if simple:
print('Using simple aproach to make axes.')
print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')
raxis = np.arange(naxis1) * 1.0
rdif = raxis - (hdr['CRPIX1'] - 1)
raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])
daxis = np.arange(naxis2) * 1.0
ddif = daxis - (hdr['CRPIX1'] - 1)
daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])
rimg = raxis # (fltarr(naxis2) + 1.)
dimg = (np.asarray(naxis1) + 1.) # daxis
return rimg, dimg
# OBNOXIOUS SFL/GLS THING
glspos = ww.wcs.ctype[0].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[0]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[0] = ctstr
print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])
glspos = ww.wcs.ctype[1].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[1]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[1] = ctstr
print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])
# CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE
if novec:
rimg = np.zeros((naxis1, naxis2))
dimg = np.zeros((naxis1, naxis2))
for i in range(naxis1):
j = np.asarray([0 for i in xrange(naxis2)])
pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)
ra, dec = ww.all_pix2world(pixcrd, 1)
rimg[i, :] = ra
dimg[i, :] = dec
else:
ximg = np.arange(naxis1) * 1.0
yimg = np.arange(naxis1) * 1.0
X, Y = np.meshgrid(ximg, yimg, indexing='xy')
ss = X.shape
xx, yy = X.flatten(), Y.flatten()
pixcrd = np.array(zip(xx, yy), np.float_)
img_new = ww.all_pix2world(pixcrd, 0)
rimg_new, dimg_new = img_new[:,0], img_new[:,1]
rimg = rimg_new.reshape(ss)
dimg = dimg_new.reshape(ss)
# GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW
raxis = np.squeeze(rimg[:, naxis2/2])
daxis = np.squeeze(dimg[naxis1/2, :])
return rimg, dimg
def write_headerfile(header_file, header):
f = open(header_file, 'w')
for iii in range(len(header)):
outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\n'
f.write(outline)
f.close()
def create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale):
hdr = pyfits.Header()
hdr['NAXIS'] = 2
hdr['NAXIS1'] = pix_len
hdr['NAXIS2'] = pix_len
hdr['CTYPE1'] = 'RA---TAN'
hdr['CRVAL1'] = float(ra_ctr)
hdr['CRPIX1'] = (pix_len / 2.) * 1.
hdr['CDELT1'] = -1.0 * pix_scale
hdr['CTYPE2'] = 'DEC--TAN'
hdr['CRVAL2'] = float(dec_ctr)
hdr['CRPIX2'] = (pix_len / 2.) * 1.
hdr['CDELT2'] = pix_scale
hdr['EQUINOX'] = 2000
return hdr
def unwise(band=None, ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None):
tel = 'unwise'
data_dir = os.path.join(_TOP_DIR, tel, 'sorted_tiles')
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, tel + '_index_file.fits')
ext = 1
index, hdr = pyfits.getdata(indexfile, ext, header=True)
# CALIBRATION TO GO FROM VEGAS TO ABMAG
w1_vtoab = 2.683
w2_vtoab = 3.319
w3_vtoab = 5.242
w4_vtoab = 6.604
# NORMALIZATION OF UNITY IN VEGAS MAG
norm_mag = 22.5
pix_as = 2.75 #arcseconds - native detector pixel size wise docs
# COUNTS TO JY CONVERSION
w1_to_mjysr = counts2jy(norm_mag, w1_vtoab, pix_as)
w2_to_mjysr = counts2jy(norm_mag, w2_vtoab, pix_as)
w3_to_mjysr = counts2jy(norm_mag, w3_vtoab, pix_as)
w4_to_mjysr = counts2jy(norm_mag, w4_vtoab, pix_as)
# MAKE A HEADER
pix_scale = 2.0 / 3600. # 2.0 arbitrary
pix_len = size_deg / pix_scale
# this should automatically populate SIMPLE and NAXIS keywords
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['BAND'] = 1, 2, 3, 4 depending on wise band
ind = np.where((index['BAND'] == band) & tile_overlaps)
ct_overlap = len(ind[0])
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
# LOOP OVER OVERLAPPING TILES AND STITCH ONTO TARGET HEADER
for ii in range(0, ct_overlap):
infile = os.path.join(data_dir, index[ind[ii]]['FNAME'])
im, hdr = pyfits.getdata(infile, header=True)
ri, di = make_axes(hdr)
hh = pywcs.WCS(target_hdr)
x, y = ww.all_world2pix(zip(ri, di), 1)
in_image = (x > 0 & x < (sz_out[0]-1)) & (y > 0 and y < (sz_out[1]-1))
if np.sum(in_image) == 0:
print("No overlap. Proceeding.")
continue
if band == 1:
im *= w1_to_mjysr
if band == 2:
im *= w2_to_mjysr
if band == 3:
im *= w3_to_mjysr
if band == 4:
im *= w4_to_mjysr
target_hdr['BUNIT'] = 'MJY/SR'
newimfile = reprojection(infile, im, hdr, target_hdr, data_dir)
im, new_hdr = pyfits.getdata(newimfile, header=True)
useful = np.where(np.isfinite(im))
outim[useful] = im[useful]
return outim, target_hdr
def counts2jy(norm_mag, calibration_value, pix_as):
# convert counts to Jy
val = 10.**((norm_mag + calibration_value) / -2.5)
val *= 3631.0
# then to MJy
val /= 1e6
# then to MJy/sr
val /= np.radians(pix_as / 3600.)**2
return val
def galex(band='fuv', ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None, write_info=True):
tel = 'galex'
data_dir = os.path.join(_TOP_DIR, tel, 'sorted_tiles')
problem_file = os.path.join(_HOME_DIR, 'problem_galaxies.txt')
#numbers_file = os.path.join(_HOME_DIR, 'number_of_tiles_per_galaxy.dat')
bg_reg_file = os.path.join(_HOME_DIR, 'galex_reprojected_bg.reg')
numbers_file = os.path.join(_HOME_DIR, 'gal_reproj_info.dat')
galaxy_mosaic_file = os.path.join(_MOSAIC_DIR, '_'.join([name, band]).upper() + '.FITS')
start_time = time.time()
if not os.path.exists(galaxy_mosaic_file):
#if name == 'NGC2976':
print name
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, tel + '_index_file.fits')
ext = 1
index, hdr = pyfits.getdata(indexfile, ext, header=True)
# CALIBRATION FROM COUNTS TO ABMAG
fuv_toab = 18.82
nuv_toab = 20.08
# PIXEL SCALE IN ARCSECONDS
pix_as = 1.5 # galex pixel scale -- from galex docs
# MAKE A HEADER
pix_scale = 1.5 / 3600. # 1.5 arbitrary: how should I set it?
pix_len = size_deg / pix_scale
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['fuv'] = 1 where fuv and
# index['nuv'] = 1 where nuv
ind = np.where((index[band]) & tile_overlaps)
ct_overlap = len(ind[0])
# MAKE SURE THERE ARE OVERLAPPING TILES
if ct_overlap == 0:
if write_info:
with open(problem_file, 'a') as myfile:
myfile.write(name + ': ' + 'No overlapping tiles\n')
return
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
prihdu = pyfits.PrimaryHDU(data=outim, header=target_hdr)
target_hdr = prihdu.header
#try:
# CREATE NEW TEMP DIRECTORY TO STORE TEMPORARY FILES
gal_dir = os.path.join(_HOME_DIR, name)
os.makedirs(gal_dir)
# CREATE SUBDIRECTORIES INSIDE TEMP DIRECTORY FOR ALL TEMP FILES
input_dir = os.path.join(gal_dir, 'input')
converted_dir = os.path.join(gal_dir, 'converted')
masked_dir = os.path.join(gal_dir, 'masked')
reprojected_dir = os.path.join(gal_dir, 'reprojected')
weights_dir = os.path.join(gal_dir, 'weights')
weighted_dir = os.path.join(gal_dir, 'weighted')
final_dir = os.path.join(gal_dir, 'mosaic')
for indir in [input_dir, reprojected_dir, weights_dir, weighted_dir, final_dir, converted_dir, masked_dir]:
os.makedirs(indir)
# GATHER THE INPUT FILES
infiles = index[ind[0]]['fname']
wtfiles = index[ind[0]]['rrhrfile']
flgfiles = index[ind[0]]['flagfile']
infiles = [os.path.join(data_dir, f) for f in infiles]
wtfiles = [os.path.join(data_dir, f) for f in wtfiles]
flgfiles = [os.path.join(data_dir, f) for f in flgfiles]
# SYMLINK ORIGINAL FILES TO TEMPORARY INPUT DIRECTORY
for infile in infiles:
basename = os.path.basename(infile)
new_in_file = os.path.join(input_dir, basename)
os.symlink(infile, new_in_file)
for wtfile in wtfiles:
basename = os.path.basename(wtfile)
new_wt_file = os.path.join(input_dir, basename)
os.symlink(wtfile, new_wt_file)
for flgfile in flgfiles:
basename = os.path.basename(flgfile)
new_flg_file = os.path.join(input_dir, basename)
os.symlink(flgfile, new_flg_file)
# CONVERT INT FILES TO MJY/SR AND WRITE NEW FILES INTO TEMP DIR
# CONVERT WT FILES TO WT/SR AND WRITE NEW FILES INTO TEMP DIR
intfiles = sorted(glob.glob(os.path.join(input_dir, '*-int.fits')))
wtfiles = sorted(glob.glob(os.path.join(input_dir, '*-rrhr.fits')))
int_outfiles = [os.path.join(converted_dir, os.path.basename(f).replace('.fits', '_mjysr.fits')) for f in intfiles]
wt_outfiles = [os.path.join(converted_dir, os.path.basename(f).replace('.fits', '.fits')) for f in wtfiles]
for i in range(len(infiles)):
im, hdr = pyfits.getdata(infiles[i], header=True)
wt, whdr = pyfits.getdata(wtfiles[i], header=True)
#wt = wtpersr(wt, pix_as)
if band.lower() == 'fuv':
im = counts2jy_galex(im, fuv_toab, pix_as)
if band.lower() == 'nuv':
im = counts2jy_galex(im, nuv_toab, pix_as)
if not os.path.exists(int_outfiles[i]):
pyfits.writeto(int_outfiles[i], im, hdr)
if not os.path.exists(wt_outfiles[i]):
pyfits.writeto(wt_outfiles[i], wt, whdr)
# APPEND UNIT INFORMATION TO THE NEW HEADER
target_hdr['BUNIT'] = 'MJY/SR'
# WRITE OUT A HEADER FILE
hdr_file = os.path.join(gal_dir, name + '_template.hdr')
write_headerfile(hdr_file, target_hdr)
# PERFORM THE REPROJECTION, WEIGHTING, AND EXTRACTION
try:
# MASK IMAGES
int_suff, rrhr_suff, flag_suff = '*_mjysr.fits', '*-rrhr.fits', '*-flags.fits'
int_images = sorted(glob.glob(os.path.join(converted_dir, int_suff)))
rrhr_images = sorted(glob.glob(os.path.join(converted_dir, rrhr_suff)))
flag_images = sorted(glob.glob(os.path.join(input_dir, flag_suff)))
mask_images(int_images, rrhr_images, flag_images, input_dir,masked_dir)
# REPROJECT IMAGES
reproject_images(hdr_file, masked_dir, reprojected_dir)
# WEIGHT IMAGES
im_suff, wt_suff = '*_mjysr_masked.fits', '*-rrhr_masked.fits'
imfiles = sorted(glob.glob(os.path.join(reprojected_dir, im_suff)))
wtfiles = sorted(glob.glob(os.path.join(reprojected_dir, wt_suff)))
weight_images(imfiles, wtfiles, weighted_dir, weights_dir)
# CREATE THE METADATA TABLES NEEDED FOR COADDITION
#tables = create_tables(weights_dir, weighted_dir)
weight_table = create_table(weights_dir, dir_type='weights')
weighted_table = create_table(weighted_dir, dir_type='int')
count_table = create_table(weighted_dir, dir_type='count')
# COADD THE REPROJECTED, WEIGHTED IMAGES AND THE WEIGHT IMAGES
coadd(hdr_file, final_dir, weights_dir, output='weights')
coadd(hdr_file, final_dir, weighted_dir, output='int')
coadd(hdr_file, final_dir, weighted_dir, output='count', add_type='count')
# DIVIDE OUT THE WEIGHTS
imagefile = finish_weight(final_dir)
# SUBTRACT OUT THE BACKGROUND
remove_background(final_dir, imagefile, bg_reg_file)
# COPY MOSAIC FILE TO CUTOUTS DIRECTORY
mosaic_file = os.path.join(final_dir, 'final_mosaic.fits')
newfile = '_'.join([name, band]).upper() + '.FITS'
new_mosaic_file = os.path.join(_MOSAIC_DIR, newfile)
shutil.copy(mosaic_file, new_mosaic_file)
# REMOVE GALAXY DIRECTORY AND EXTRA FILES
#shutil.rmtree(gal_dir, ignore_errors=True)
stop_time = time.time()
total_time = (stop_time - start_time) / 60.
# WRITE OUT THE NUMBER OF TILES THAT OVERLAP THE GIVEN GALAXY
if write_info:
out_arr = [name, len(infiles), np.around(total_time,2)]
with open(numbers_file, 'a') as nfile:
nfile.write('{0: >10}'.format(out_arr[0]))
nfile.write('{0: >6}'.format(out_arr[1]))
nfile.write('{0: >6}'.format(out_arr[2]) + '\n')
#nfile.write(name + ': ' + str(len(infiles)) + '\n')
# SOMETHING WENT WRONG
except Exception as inst:
me = sys.exc_info()[0]
if write_info:
with open(problem_file, 'a') as myfile:
myfile.write(name + ': ' + str(me) + ': '+str(inst)+'\n')
shutil.rmtree(gal_dir, ignore_errors=True)
return
def counts2jy_galex(counts, cal, pix_as):
# first convert to abmag
abmag = -2.5 * np.log10(counts) + cal
# then convert to Jy
f_nu = 10**(abmag/-2.5) * 3631.
# then to MJy
f_nu *= 1e-6
# then to MJy/sr
val = f_nu / (np.radians(pix_as/3600))**2
return val
#val = flux / MJYSR2JYARCSEC / pixel_area / 1e-23 / C * FUV_LAMBDA**2
def wtpersr(wt, pix_as):
return wt / (np.radians(pix_as/3600))**2
def mask_galex(intfile, wtfile, flagfile, outfile=None, chip_rad = 1400, chip_x0=1920, chip_y0=1920, out_intfile=None, out_wtfile=None):
if out_intfile is None:
out_intfile = intfile.replace('.fits', '_masked.fits')
if out_wtfile is None:
out_wtfile = wtfile.replace('.fits', '_masked.fits')
if not os.path.exists(out_intfile):
data, hdr = pyfits.getdata(intfile, header=True)
wt, whdr = pyfits.getdata(wtfile, header=True)
flag, fhdr = pyfits.getdata(flagfile, header=True)
factor = float(len(data)) / len(flag)
upflag = zoom(flag, factor, order=0)
x = np.arange(data.shape[1]).reshape(1, -1) + 1
y = np.arange(data.shape[0]).reshape(-1, 1) + 1
r = np.sqrt((x - chip_x0)**2 + (y - chip_y0)**2)
#i = (r > chip_rad) | (data == 0) # doesnt work for ngc0024
i = (r > chip_rad) #| (data == 0)
j = (data == 0)
k = (wt == -1.1e30)
data = np.where(i | k, 0, data) #0
wt = np.where(i | k, 1e-20, wt) #1e-20
pyfits.writeto(out_intfile, data, hdr)
pyfits.writeto(out_wtfile, wt, whdr)
def mask_images(int_images, rrhr_images, flag_images, input_dir, masked_dir):
for i in range(len(int_images)):
image_infile = int_images[i]
wt_infile = rrhr_images[i]
flg_infile = flag_images[i]
image_outfile = os.path.join(masked_dir, os.path.basename(image_infile).replace('.fits', '_masked.fits'))
wt_outfile = os.path.join(masked_dir, os.path.basename(wt_infile).replace('.fits', '_masked.fits'))
mask_galex(image_infile, wt_infile, flg_infile, out_intfile=image_outfile, out_wtfile=wt_outfile)
def reproject_images(template_header, input_dir, reprojected_dir, whole=False, exact=True):
input_table = os.path.join(input_dir, 'input.tbl')
montage.mImgtbl(input_dir, input_table, corners=True)
# Create reprojection directory, reproject, and get image metadata
stats_table = os.path.join(reprojected_dir, 'mProjExec_stats.log')
montage.mProjExec(input_table, template_header, reprojected_dir, stats_table, raw_dir=input_dir, whole=whole, exact=exact)
reprojected_table = os.path.join(reprojected_dir, 'reprojected.tbl')
montage.mImgtbl(reprojected_dir, reprojected_table, corners=True)
def weight_images(imfiles, wtfiles, weighted_dir, weights_dir):
for i in range(len(imfiles)):
imfile = imfiles[i]
wtfile = wtfiles[i]
im, hdr = pyfits.getdata(imfile, header=True)
rrhr, rrhrhdr = pyfits.getdata(wtfile, header=True)
wt = rrhr
newim = im * wt
nf = imfiles[i].split('/')[-1].replace('.fits', '_weighted.fits')
newfile = os.path.join(weighted_dir, nf)
pyfits.writeto(newfile, newim, hdr)
old_area_file = imfiles[i].replace('.fits', '_area.fits')
new_area_file = newfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
nf = wtfiles[i].split('/')[-1].replace('.fits', '_weights.fits')
weightfile = os.path.join(weights_dir, nf)
pyfits.writeto(weightfile, wt, rrhrhdr)
old_area_file = wtfiles[i].replace('.fits', '_area.fits')
new_area_file = weightfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
def create_table(in_dir, dir_type=None):
if dir_type is None:
reprojected_table = os.path.join(in_dir, 'reprojected.tbl')
else:
reprojected_table = os.path.join(in_dir, dir_type + '_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return reprojected_table
def coadd(template_header, output_dir, input_dir, output=None,add_type='mean'):
img_dir = input_dir
# output is either 'weights' or 'int'
if output is None:
reprojected_table = os.path.join(img_dir, 'reprojected.tbl')
out_image = os.path.join(output_dir, 'mosaic.fits')
else:
reprojected_table = os.path.join(img_dir, output + '_reprojected.tbl')
out_image = os.path.join(output_dir, output + '_mosaic.fits')
montage.mAdd(reprojected_table, template_header, out_image, img_dir=img_dir, exact=True, type=add_type)
def finish_weight(output_dir):
image_file = os.path.join(output_dir, 'int_mosaic.fits')
wt_file = os.path.join(output_dir, 'weights_mosaic.fits')
count_file = os.path.join(output_dir, 'count_mosaic.fits')
im, hdr = pyfits.getdata(image_file, header=True)
wt = pyfits.getdata(wt_file)
ct = pyfits.getdata(count_file)
newim = im / wt
newfile = os.path.join(output_dir, 'image_mosaic.fits')
pyfits.writeto(newfile, newim, hdr)
return newfile
def remove_background(final_dir, imfile, bgfile):
data, hdr = pyfits.getdata(imfile, header=True)
box_inds = read_bg_regfile(bgfile)
allvals = []
sample_means = []
for box in box_inds:
rectangle = zip(box[0::2], box[1::2])
sample = get_bg_sample(data, hdr, rectangle)
for s in sample:
allvals.append(s)
sample_mean = np.nanmean(sample)
sample_means.append(sample_mean)
this_mean = np.around(np.nanmean(sample_means), 8)
final_data = data - this_mean
hdr['BG'] = this_mean
hdr['comment'] = 'Background has been subtracted.'
outfile = os.path.join(final_dir, 'final_mosaic.fits')
pyfits.writeto(outfile, final_data, hdr)
def read_bg_regfile(regfile):
f = open(regfile, 'r')
boxes = f.readlines()
f.close()
box_list = []
for b in boxes:
this_box = []
box = b.strip('polygon()\n').split(',')
[this_box.append(int(np.around(float(bb), 0))) for bb in box]
box_list.append(this_box)
return box_list
def get_bg_sample(data, hdr, box):
wcs = pywcs.WCS(hdr, naxis=2)
x, y = np.arange(data.shape[0]), np.arange(data.shape[1])
X, Y = np.meshgrid(x, y, indexing='ij')
xx, yy = X.flatten(), Y.flatten()
pixels = np.array(zip(yy, xx))
box_coords = box
sel = Path(box_coords).contains_points(pixels)
sample = data.flatten()[sel]
return sample
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Migrates histogram_suffixes to patterned histograms"""
import argparse
import logging
import os
from xml.dom import minidom
import extract_histograms
import histogram_configuration_model
import histogram_paths
import path_util
HISTOGRAM_SUFFIXES_LIST_PATH = path_util.GetInputFile(
'tools/metrics/histograms/metadata/histogram_suffixes_list.xml')
def _ExtractObsoleteNode(node, recursive=True):
"""Extracts obsolete child from |node|. Returns None if not exists."""
if not recursive:
obsolete = [
element for element in node.getElementsByTagName('obsolete')
if element.parentNode == node
]
else:
obsolete = node.getElementsByTagName('obsolete')
if not obsolete:
return None
assert len(obsolete) == 1, (
'Node %s should at most contain one obsolete node.' %
node.getAttribute('name'))
return obsolete[0]
def _ExtractOwnerNodes(node):
"""Extracts all owners from |node|. Returns None if not exists."""
return node.getElementsByTagName('owner')
def _RemoveSuffixesComment(node, histogram_suffixes_name):
"""Remove suffixes related comments from |node|."""
for child in node.childNodes:
if child.nodeType == minidom.Node.COMMENT_NODE:
if ('Name completed by' in child.data
and histogram_suffixes_name in child.data):
node.removeChild(child)
def _UpdateSummary(histogram, histogram_suffixes_name):
"""Appends a placeholder string to the |histogram|'s summary node."""
summary = histogram.getElementsByTagName('summary')
assert len(summary) == 1, 'A histogram should have a single summary node.'
summary = summary[0]
if summary.firstChild.nodeType != summary.TEXT_NODE:
raise ValueError('summary_node doesn\'t contain text.')
summary.firstChild.replaceWholeText(
'%s {%s}' % (summary.firstChild.data.strip(), histogram_suffixes_name))
def _AreAllAffectedHistogramsFound(affected_histograms, histograms):
"""Checks that are all affected histograms found in |histograms|."""
histogram_names = [histogram.getAttribute('name') for histogram in histograms]
return all(
affected_histogram.getAttribute('name') in histogram_names
for affected_histogram in affected_histograms)
def _GetSuffixesDict(nodes, all_histograms):
"""Gets a dict of simple histogram-suffixes to be used in the migration.
Returns two dicts of histogram-suffixes to be migrated to the new patterned
histograms syntax.
The first dict: the keys are the histogram-suffixes' affected histogram name
and the values are the histogram_suffixes nodes that have only one
affected-histogram. These histograms-suffixes can be converted to inline
patterned histograms.
The second dict: the keys are the histogram_suffixes name and the values
are the histogram_suffixes nodes whose affected-histograms are all present in
the |all_histograms|. These histogram suffixes can be converted to out-of-line
variants.
Args:
nodes: A Nodelist of histograms_suffixes nodes.
all_histograms: A Nodelist of all chosen histograms.
Returns:
A dict of histograms-suffixes nodes keyed by their names.
"""
single_affected = {}
all_affected_found = {}
for histogram_suffixes in nodes:
affected_histograms = histogram_suffixes.getElementsByTagName(
'affected-histogram')
if len(affected_histograms) == 1:
affected_histogram = affected_histograms[0].getAttribute('name')
single_affected[affected_histogram] = histogram_suffixes
elif _AreAllAffectedHistogramsFound(affected_histograms, all_histograms):
for affected_histogram in affected_histograms:
affected_histogram = affected_histogram.getAttribute('name')
if affected_histogram in all_affected_found:
logging.warning(
'Histogram %s is already associated with other suffixes. '
'Please manually migrate it.', affected_histogram)
continue
all_affected_found[affected_histogram] = histogram_suffixes
return single_affected, all_affected_found
def _GetBaseVariant(doc, histogram):
"""Returns a <variant> node whose name is an empty string as the base variant.
If histogram has attribute `base = True`, it means that the base histogram
should be marked as obsolete.
Args:
doc: A Document object which is used to create a new <variant> node.
histogram: The <histogram> node to check whether its base is true or not.
Returns:
A <variant> node.
"""
is_base = False
if histogram.hasAttribute('base'):
is_base = histogram.getAttribute('base').lower() == 'true'
histogram.removeAttribute('base')
base_variant = doc.createElement('variant')
base_variant.setAttribute('name', '')
if is_base:
base_obsolete_node = doc.createElement('obsolete')
base_obsolete_node.appendChild(
doc.createTextNode(
extract_histograms.DEFAULT_BASE_HISTOGRAM_OBSOLETE_REASON))
base_variant.appendChild(base_obsolete_node)
return base_variant
def _PopulateVariantsWithSuffixes(doc, node, histogram_suffixes):
"""Populates <variant> nodes to |node| from <suffix>.
This function returns True if none of the suffixes contains 'base' attribute.
If this function returns false, the caller's histogram node will not be
updated. This is mainly because base suffix is a much more complicated case
and thus it can not be automatically updated at least for now.
Args:
doc: A Document object which is used to create a new <variant> node.
node: The node to be populated. it should be either <token> for inline
variants or <variants> for out-of-line variants.
histogram_suffixes: A <histogram_suffixes> node.
Returns:
True if the node can be updated automatically.
"""
separator = histogram_suffixes.getAttribute('separator')
suffixes_owners = _ExtractOwnerNodes(histogram_suffixes)
suffixes_name = histogram_suffixes.getAttribute('name')
# Check if <histogram_suffixes> node has its own <obsolete> node.
obsolete_histogram_suffix_node = _ExtractObsoleteNode(histogram_suffixes,
False)
for suffix in histogram_suffixes.getElementsByTagName('suffix'):
# The base suffix is a much more complicated case. It might require manual
# effort to migrate them so skip this case for now.
suffix_name = suffix.getAttribute('name')
if suffix.hasAttribute('base'):
logging.warning(
'suffix: %s in histogram_suffixes %s has base attribute. Please '
'manually migrate it.', suffix_name, suffixes_name)
return False
# Suffix name might be empty. In this case, in order not to collide with the
# base variant, remove the base variant first before populating this.
if not suffix_name:
logging.warning(
'histogram suffixes: %s contains empty string suffix and thus we '
'have to manually update the empty string variant in these base '
'histograms: %s.', suffixes_name, ','.join(
h.getAttribute('name') for h in
histogram_suffixes.getElementsByTagName('affected-histogram')))
return False
variant = doc.createElement('variant')
if histogram_suffixes.hasAttribute('ordering'):
variant.setAttribute('name', suffix_name + separator)
else:
variant.setAttribute('name', separator + suffix_name)
if suffix.hasAttribute('label'):
variant.setAttribute('summary', suffix.getAttribute('label'))
# Obsolete the obsolete node from suffix to the new variant. The obsolete
# node for each suffix should override the obsolete node, if exists,
# in the histogram_suffixes node.
obsolete = _ExtractObsoleteNode(suffix) or obsolete_histogram_suffix_node
if obsolete:
variant.appendChild(obsolete.cloneNode(deep=True))
# Populate owner's node from histogram suffixes to each new variant.
for owner in suffixes_owners:
variant.appendChild(owner.cloneNode(deep=True))
node.appendChild(variant)
return True
def _UpdateHistogramName(histogram, histogram_suffixes):
"""Adds histogram_suffixes's placeholder to the histogram name."""
histogram_name = histogram.getAttribute('name')
histogram_suffixes_name = histogram_suffixes.getAttribute('name')
ordering = histogram_suffixes.getAttribute('ordering')
if not ordering:
histogram.setAttribute('name',
'%s{%s}' % (histogram_name, histogram_suffixes_name))
else:
parts = ordering.split(',')
placement = 1
if len(parts) > 1:
placement = int(parts[1])
sections = histogram_name.split('.')
cluster = '.'.join(sections[0:placement]) + '.'
reminder = '.'.join(sections[placement:])
histogram.setAttribute(
'name', '%s{%s}%s' % (cluster, histogram_suffixes_name, reminder))
def MigrateToInlinePatterenedHistogram(doc, histogram, histogram_suffixes):
"""Migates a single histogram suffixes to an inline patterned histogram."""
# Keep a deep copy in case when the |histogram| fails to be migrated.
old_histogram = histogram.cloneNode(deep=True)
# Update histogram's name with the histogram_suffixes' name.
histogram_suffixes_name = histogram_suffixes.getAttribute('name')
_UpdateHistogramName(histogram, histogram_suffixes)
# Append |histogram_suffixes_name| placeholder string to the summary text.
_UpdateSummary(histogram, histogram_suffixes_name)
# Create an inline <token> node.
token = doc.createElement('token')
token.setAttribute('key', histogram_suffixes_name)
token.appendChild(_GetBaseVariant(doc, histogram))
# Populate <variant>s to the inline <token> node.
if not _PopulateVariantsWithSuffixes(doc, token, histogram_suffixes):
logging.warning('histogram_suffixes: %s needs manually effort',
histogram_suffixes_name)
histograms = histogram.parentNode
histograms.removeChild(histogram)
# Restore old histogram when we the script fails to migrate it.
histograms.appendChild(old_histogram)
else:
histogram.appendChild(token)
histogram_suffixes.parentNode.removeChild(histogram_suffixes)
# Remove obsolete comments from the histogram node.
_RemoveSuffixesComment(histogram, histogram_suffixes_name)
def MigrateToOutOflinePatterenedHistogram(doc, histogram, histogram_suffixes):
"""Migates a histogram suffixes to out-of-line patterned histogram."""
# Update histogram's name with the histogram_suffixes' name.
histogram_suffixes_name = histogram_suffixes.getAttribute('name')
_UpdateHistogramName(histogram, histogram_suffixes)
# Append |histogram_suffixes_name| placeholder string to the summary text.
_UpdateSummary(histogram, histogram_suffixes_name)
# Create a <token> node that links to an out-of-line <variants>.
token = doc.createElement('token')
token.setAttribute('key', histogram_suffixes_name)
token.setAttribute('variants', histogram_suffixes_name)
token.appendChild(_GetBaseVariant(doc, histogram))
histogram.appendChild(token)
# Remove obsolete comments from the histogram node.
_RemoveSuffixesComment(histogram, histogram_suffixes_name)
def _MigrateOutOfLineVariants(doc, histograms, suffixes_to_convert):
"""Converts a histogram-suffixes node to an out-of-line variants."""
histograms_node = histograms.getElementsByTagName('histograms')
assert len(histograms_node) == 1, (
'Every histograms.xml should have only one <histograms> node.')
for suffixes in suffixes_to_convert:
histogram_suffixes_name = suffixes.getAttribute('name')
variants = doc.createElement('variants')
variants.setAttribute('name', histogram_suffixes_name)
if not _PopulateVariantsWithSuffixes(doc, variants, suffixes):
logging.warning('histogram_suffixes: %s needs manually effort',
histogram_suffixes_name)
else:
histograms_node[0].appendChild(variants)
suffixes.parentNode.removeChild(suffixes)
def ChooseFiles(args):
"""Chooses a set of files to process so that we can migrate incrementally."""
paths = []
for path in sorted(histogram_paths.HISTOGRAMS_XMLS):
if 'metadata' in path and path.endswith('histograms.xml'):
name = os.path.basename(os.path.dirname(path))
if args.start <= name[0] <= args.end:
paths.append(path)
if args.obsolete:
paths.append(histogram_paths.OBSOLETE_XML)
return paths
def SuffixesToVariantsMigration(args):
"""Migates all histogram suffixes to patterned histograms."""
histogram_suffixes_list = minidom.parse(open(HISTOGRAM_SUFFIXES_LIST_PATH))
histogram_suffixes_nodes = histogram_suffixes_list.getElementsByTagName(
'histogram_suffixes')
doc = minidom.Document()
for histograms_file in ChooseFiles(args):
histograms = minidom.parse(open(histograms_file))
single_affected, all_affected_found = _GetSuffixesDict(
histogram_suffixes_nodes, histograms.getElementsByTagName('histogram'))
suffixes_to_convert = set()
for histogram in histograms.getElementsByTagName('histogram'):
name = histogram.getAttribute('name')
# Migrate inline patterned histograms.
if name in single_affected.keys():
MigrateToInlinePatterenedHistogram(doc, histogram,
single_affected[name])
elif name in all_affected_found.keys():
suffixes_to_convert.add(all_affected_found[name])
MigrateToOutOflinePatterenedHistogram(doc, histogram,
all_affected_found[name])
_MigrateOutOfLineVariants(doc, histograms, suffixes_to_convert)
# Update histograms.xml with patterned histograms.
with open(histograms_file, 'w') as f:
pretty_xml_string = histogram_configuration_model.PrettifyTree(histograms)
f.write(pretty_xml_string)
# Remove histogram_suffixes that have already been migrated.
with open(HISTOGRAM_SUFFIXES_LIST_PATH, 'w') as f:
pretty_xml_string = histogram_configuration_model.PrettifyTree(
histogram_suffixes_list)
f.write(pretty_xml_string)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--start',
help='Start migration from a certain character (inclusive).',
default='a')
parser.add_argument('--end',
help='End migration at a certain character (inclusive).',
default='z')
parser.add_argument('--obsolete',
help='Whether to migrate obsolete_histograms.xml',
default=False)
args = parser.parse_args()
assert len(args.start) == 1 and len(args.end) == 1, (
'start and end flag should only contain a single letter.')
SuffixesToVariantsMigration(args)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import timedelta # noqa
from django.conf import settings
from django.utils import datetime_safe
from keystoneclient import access
from keystoneclient.v2_0 import ec2
from keystoneclient.v2_0 import roles
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from keystoneclient.v3 import domains
from keystoneclient.v3 import groups
from keystoneclient.v3 import role_assignments
from openstack_auth import user as auth_user
from openstack_dashboard.test.test_data import utils
# Dummy service catalog with all service.
# All endpoint URLs should point to example.com.
# Try to keep them as accurate to real data as possible (ports, URIs, etc.)
SERVICE_CATALOG = [
{"type": "compute",
"name": "nova",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8774/v2",
"internalURL": "http://int.nova.example.com:8774/v2",
"publicURL": "http://public.nova.example.com:8774/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova2.example.com:8774/v2",
"internalURL": "http://int.nova2.example.com:8774/v2",
"publicURL": "http://public.nova2.example.com:8774/v2"}]},
{"type": "volume",
"name": "cinder",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"}]},
{"type": "volumev2",
"name": "cinderv2",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v2",
"internalURL": "http://int.nova.example.com:8776/v2",
"publicURL": "http://public.nova.example.com:8776/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v2",
"internalURL": "http://int.nova.example.com:8776/v2",
"publicURL": "http://public.nova.example.com:8776/v2"}]},
{"type": "image",
"name": "glance",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.glance.example.com:9292/v1",
"internalURL": "http://int.glance.example.com:9292/v1",
"publicURL": "http://public.glance.example.com:9292/v1"}]},
{"type": "identity",
"name": "keystone",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.keystone.example.com:35357/v2.0",
"internalURL": "http://int.keystone.example.com:5000/v2.0",
"publicURL": "http://public.keystone.example.com:5000/v2.0"}]},
{"type": "object-store",
"name": "swift",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.swift.example.com:8080/",
"internalURL": "http://int.swift.example.com:8080/",
"publicURL": "http://public.swift.example.com:8080/"}]},
{"type": "network",
"name": "neutron",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.neutron.example.com:9696/",
"internalURL": "http://int.neutron.example.com:9696/",
"publicURL": "http://public.neutron.example.com:9696/"}]},
{"type": "ec2",
"name": "EC2 Service",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8773/services/Admin",
"publicURL": "http://public.nova.example.com:8773/services/Cloud",
"internalURL": "http://int.nova.example.com:8773/services/Cloud"}]},
{"type": "metering",
"name": "ceilometer",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.ceilometer.example.com:8777",
"publicURL": "http://public.ceilometer.example.com:8777",
"internalURL": "http://int.ceilometer.example.com:8777"}]},
{"type": "orchestration",
"name": "Heat",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.heat.example.com:8004/v1",
"publicURL": "http://public.heat.example.com:8004/v1",
"internalURL": "http://int.heat.example.com:8004/v1"}]},
{"type": "database",
"name": "Trove",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.trove.example.com:8779/v1.0",
"publicURL": "http://public.trove.example.com:8779/v1.0",
"internalURL": "http://int.trove.example.com:8779/v1.0"}]},
{"type": "data_processing",
"name": "Sahara",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.sahara.example.com:8386/v1.1",
"publicURL": "http://public.sahara.example.com:8386/v1.1",
"internalURL": "http://int.sahara.example.com:8386/v1.1"}]}
]
def data(TEST):
# Make a deep copy of the catalog to avoid persisting side-effects
# when tests modify the catalog.
TEST.service_catalog = copy.deepcopy(SERVICE_CATALOG)
TEST.tokens = utils.TestDataContainer()
TEST.domains = utils.TestDataContainer()
TEST.users = utils.TestDataContainer()
TEST.groups = utils.TestDataContainer()
TEST.tenants = utils.TestDataContainer()
TEST.role_assignments = utils.TestDataContainer()
TEST.roles = utils.TestDataContainer()
TEST.ec2 = utils.TestDataContainer()
admin_role_dict = {'id': '1',
'name': 'admin'}
admin_role = roles.Role(roles.RoleManager, admin_role_dict)
member_role_dict = {'id': "2",
'name': settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE}
member_role = roles.Role(roles.RoleManager, member_role_dict)
TEST.roles.add(admin_role, member_role)
TEST.roles.admin = admin_role
TEST.roles.member = member_role
domain_dict = {'id': "1",
'name': 'test_domain',
'description': "a test domain.",
'enabled': True}
domain_dict_2 = {'id': "2",
'name': 'disabled_domain',
'description': "a disabled test domain.",
'enabled': False}
domain = domains.Domain(domains.DomainManager, domain_dict)
disabled_domain = domains.Domain(domains.DomainManager, domain_dict_2)
TEST.domains.add(domain, disabled_domain)
TEST.domain = domain # Your "current" domain
user_dict = {'id': "1",
'name': 'test_user',
'email': 'test@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user = users.User(None, user_dict)
user_dict = {'id': "2",
'name': 'user_two',
'email': 'two@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user2 = users.User(None, user_dict)
user_dict = {'id': "3",
'name': 'user_three',
'email': 'three@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user3 = users.User(None, user_dict)
user_dict = {'id': "4",
'name': 'user_four',
'email': 'four@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
user4 = users.User(None, user_dict)
user_dict = {'id': "5",
'name': 'user_five',
'email': None,
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "1"}
user5 = users.User(None, user_dict)
TEST.users.add(user, user2, user3, user4, user5)
TEST.user = user # Your "current" user
TEST.user.service_catalog = copy.deepcopy(SERVICE_CATALOG)
group_dict = {'id': "1",
'name': 'group_one',
'description': 'group one description',
'project_id': '1',
'domain_id': '1'}
group = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "2",
'name': 'group_two',
'description': 'group two description',
'project_id': '1',
'domain_id': '1'}
group2 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "3",
'name': 'group_three',
'description': 'group three description',
'project_id': '1',
'domain_id': '1'}
group3 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "4",
'name': 'group_four',
'description': 'group four description',
'project_id': '2',
'domain_id': '2'}
group4 = groups.Group(groups.GroupManager(None), group_dict)
TEST.groups.add(group, group2, group3, group4)
role_assignments_dict = {'user': {'id': '1'},
'role': {'id': '1'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment1 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '2'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment2 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'group': {'id': '1'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment3 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '3'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment4 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '1'},
'role': {'id': '1'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment1 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '2'},
'role': {'id': '2'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment2 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'group': {'id': '1'},
'role': {'id': '2'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment3 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '3'},
'role': {'id': '2'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment4 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
TEST.role_assignments.add(proj_role_assignment1,
proj_role_assignment2,
proj_role_assignment3,
proj_role_assignment4,
domain_role_assignment1,
domain_role_assignment2,
domain_role_assignment3,
domain_role_assignment4)
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1',
'domain_name': 'test_domain'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant = tenants.Tenant(tenants.TenantManager, tenant_dict)
disabled_tenant = tenants.Tenant(tenants.TenantManager, tenant_dict_2)
tenant_unicode = tenants.Tenant(tenants.TenantManager, tenant_dict_3)
TEST.tenants.add(tenant, disabled_tenant, tenant_unicode)
TEST.tenant = tenant # Your "current" tenant
tomorrow = datetime_safe.datetime.now() + timedelta(days=1)
expiration = tomorrow.isoformat()
scoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration,
'tenant': tenant_dict,
'tenants': [tenant_dict]},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
scoped_access_info = access.AccessInfo.factory(resp=None,
body=scoped_token_dict)
unscoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
unscoped_access_info = access.AccessInfo.factory(resp=None,
body=unscoped_token_dict)
scoped_token = auth_user.Token(scoped_access_info)
unscoped_token = auth_user.Token(unscoped_access_info)
TEST.tokens.add(scoped_token, unscoped_token)
TEST.token = scoped_token # your "current" token.
TEST.tokens.scoped_token = scoped_token
TEST.tokens.unscoped_token = unscoped_token
access_secret = ec2.EC2(ec2.CredentialsManager, {"access": "access",
"secret": "secret",
"tenant_id": tenant.id})
TEST.ec2.add(access_secret)
|
|
"""JSON parser for Stats Perform MA3 feeds."""
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
import pandas as pd
from ...base import MissingDataError
from .base import OptaJSONParser, _get_end_x, _get_end_y, assertget
class MA3JSONParser(OptaJSONParser):
"""Extract data from a Stats Perform MA3 data stream.
Parameters
----------
path : str
Path of the data file.
"""
_position_map = {
1: "Goalkeeper",
2: "Defender",
3: "Midfielder",
4: "Forward",
5: "Substitute",
}
def _get_match_info(self) -> Dict[str, Any]:
if "matchInfo" in self.root:
return self.root["matchInfo"]
raise MissingDataError
def _get_live_data(self) -> Dict[str, Any]:
if "liveData" in self.root:
return self.root["liveData"]
raise MissingDataError
def extract_competitions(self) -> Dict[Tuple[str, str], Dict[str, Any]]:
"""Return a dictionary with all available competitions.
Returns
-------
dict
A mapping between competion IDs and the information available about
each competition in the data stream.
"""
match_info = self._get_match_info()
season = assertget(match_info, "tournamentCalendar")
competition = assertget(match_info, "competition")
competition_id = assertget(competition, "id")
season_id = assertget(season, "id")
season = dict(
# Fields required by the base schema
season_id=season_id,
season_name=assertget(season, "name"),
competition_id=competition_id,
competition_name=assertget(competition, "name"),
)
return {(competition_id, season_id): season}
def extract_games(self) -> Dict[str, Dict[str, Any]]:
"""Return a dictionary with all available games.
Returns
-------
dict
A mapping between game IDs and the information available about
each game in the data stream.
"""
match_info = self._get_match_info()
live_data = self._get_live_data()
season = assertget(match_info, "tournamentCalendar")
competition = assertget(match_info, "competition")
contestant = assertget(match_info, "contestant")
venue = assertget(match_info, "venue")
game_id = assertget(match_info, "id")
match_details = assertget(live_data, "matchDetails")
scores = assertget(match_details, "scores")
score_total = assertget(scores, "total")
home_score = None
away_score = None
if isinstance(score_total, dict):
home_score = assertget(score_total, "home")
away_score = assertget(score_total, "away")
game_date = assertget(match_info, "date")[0:10]
game_time = assertget(match_info, "time")[0:8]
game_datetime = f"{game_date}T{game_time}"
return {
game_id: dict(
# Fields required by the base schema
game_id=game_id,
season_id=assertget(season, "id"),
competition_id=assertget(competition, "id"),
game_day=int(assertget(match_info, "week")),
game_date=datetime.strptime(game_datetime, "%Y-%m-%dT%H:%M:%S"),
home_team_id=self._extract_team_id(contestant, "home"),
away_team_id=self._extract_team_id(contestant, "away"),
# Optional fields
home_score=home_score,
away_score=away_score,
duration=assertget(match_details, "matchLengthMin"),
# referee=?
venue=assertget(venue, "shortName"),
# attendance=?
# home_manager=?
# away_manager=?
)
}
def extract_teams(self) -> Dict[str, Dict[str, Any]]:
"""Return a dictionary with all available teams.
Returns
-------
dict
A mapping between team IDs and the information available about
each team in the data stream.
"""
match_info = self._get_match_info()
contestants = assertget(match_info, "contestant")
teams = {}
for contestant in contestants:
team_id = assertget(contestant, "id")
team = dict(
# Fields required by the base schema
team_id=team_id,
team_name=assertget(contestant, "name"),
)
teams[team_id] = team
return teams
def extract_players(self) -> Dict[Tuple[str, str], Dict[str, Any]]: # noqa: C901
"""Return a dictionary with all available players.
Returns
-------
dict
A mapping between (game ID, player ID) tuples and the information
available about each player in the data stream.
"""
match_info = self._get_match_info()
game_id = assertget(match_info, "id")
live_data = self._get_live_data()
events = assertget(live_data, "event")
game_duration = self._extract_duration()
playerid_to_name = {}
players_data: Dict[str, List[Any]] = {
"starting_position_id": [],
"player_id": [],
"team_id": [],
"position_in_formation": [],
"jersey_number": [],
}
for event in events:
event_type = assertget(event, "typeId")
if event_type == 34:
team_id = assertget(event, "contestantId")
qualifiers = assertget(event, "qualifier")
for q in qualifiers:
qualifier_id = assertget(q, "qualifierId")
value = assertget(q, "value")
value = value.split(", ")
if qualifier_id == 30:
players_data["player_id"] += value
team = [team_id for _ in range(len(value))]
players_data["team_id"] += team
elif qualifier_id == 44:
value = [int(v) for v in value]
players_data["starting_position_id"] += value
elif qualifier_id == 131:
value = [int(v) for v in value]
players_data["position_in_formation"] += value
elif qualifier_id == 59:
value = [int(v) for v in value]
players_data["jersey_number"] += value
player_id = event.get("playerId")
if player_id is None:
continue
player_name = assertget(event, "playerName")
if player_id not in playerid_to_name:
playerid_to_name[player_id] = player_name
df_players_data = pd.DataFrame.from_dict(players_data) # type: ignore
substitutions = list(self.extract_substitutions().values())
substitutions_columns = ["player_id", "team_id", "minute_start", "minute_end"]
df_substitutions = pd.DataFrame(substitutions, columns=substitutions_columns)
df_substitutions = df_substitutions.groupby(["player_id", "team_id"]).max().reset_index()
df_substitutions["minute_start"] = df_substitutions["minute_start"].fillna(0)
df_substitutions["minute_end"] = df_substitutions["minute_end"].fillna(game_duration)
if df_substitutions.empty:
df_players_data["minute_start"] = 0
df_players_data["minute_end"] = game_duration
else:
df_players_data = df_players_data.merge(
df_substitutions, on=["team_id", "player_id"], how="left"
)
df_players_data["is_starter"] = df_players_data["position_in_formation"] > 0
df_players_data.loc[
df_players_data["is_starter"] & df_players_data["minute_start"].isnull(),
"minute_start",
] = 0
df_players_data.loc[
df_players_data["is_starter"] & df_players_data["minute_end"].isnull(), "minute_end"
] = game_duration
df_players_data["minutes_played"] = (
(df_players_data["minute_end"] - df_players_data["minute_start"]).fillna(0).astype(int)
)
players = {}
for _, player in df_players_data.iterrows():
if player.minutes_played > 0:
players[(game_id, player.player_id)] = {
# Fields required by the base schema
"game_id": game_id,
"team_id": player.team_id,
"player_id": player.player_id,
"player_name": playerid_to_name[player.player_id],
"is_starter": player.is_starter,
"minutes_played": player.minutes_played,
"jersey_number": player.jersey_number,
# Fields required by the opta schema
"starting_position": self._position_map.get(
player.starting_position_id, "Unknown"
),
}
return players
def extract_events(self) -> Dict[Tuple[str, int], Dict[str, Any]]:
"""Return a dictionary with all available events.
Returns
-------
dict
A mapping between (game ID, event ID) tuples and the information
available about each event in the data stream.
"""
match_info = self._get_match_info()
live_data = self._get_live_data()
game_id = assertget(match_info, "id")
events = {}
for element in assertget(live_data, "event"):
timestamp_string = assertget(element, "timeStamp")
timestamp = self._convert_timestamp(timestamp_string)
qualifiers = {
int(q["qualifierId"]): q.get("value") for q in element.get("qualifier", [])
}
start_x = float(assertget(element, "x"))
start_y = float(assertget(element, "y"))
end_x = _get_end_x(qualifiers) or start_x
end_y = _get_end_y(qualifiers) or start_y
event_id = int(assertget(element, "id"))
event = dict(
# Fields required by the base schema
game_id=game_id,
event_id=event_id,
period_id=int(assertget(element, "periodId")),
team_id=assertget(element, "contestantId"),
player_id=element.get("playerId"),
type_id=int(assertget(element, "typeId")),
# Fields required by the opta schema
timestamp=timestamp,
minute=int(assertget(element, "timeMin")),
second=int(assertget(element, "timeSec")),
outcome=bool(int(element.get("outcome", 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
qualifiers=qualifiers,
# Optional fields
assist=bool(int(element.get("assist", 0))),
keypass=bool(int(element.get("keyPass", 0))),
)
events[(game_id, event_id)] = event
return events
def extract_substitutions(self) -> Dict[int, Dict[str, Any]]:
"""Return a dictionary with all substitution events.
Returns
-------
dict
A mapping between player IDs and the information available about
each substitution in the data stream.
"""
live_data = self._get_live_data()
events = assertget(live_data, "event")
subs = {}
for e in events:
event_type = assertget(e, "typeId")
if event_type in (18, 19):
sub_id = assertget(e, "playerId")
substitution_data = {
"player_id": assertget(e, "playerId"),
"team_id": assertget(e, "contestantId"),
}
if event_type == 18:
substitution_data["minute_end"] = assertget(e, "timeMin")
else:
substitution_data["minute_start"] = assertget(e, "timeMin")
subs[sub_id] = substitution_data
return subs
def _extract_duration(self) -> int:
live_data = self._get_live_data()
events = assertget(live_data, "event")
game_duration = 90
for event in events:
event_type = assertget(event, "typeId")
if event_type == 30:
# todo: add 1st half time
qualifiers = assertget(event, "qualifier")
for q in qualifiers:
qualifier = assertget(q, "qualifierId")
if qualifier == 209:
new_duration = assertget(event, "timeMin")
if new_duration > game_duration:
game_duration = new_duration
return game_duration
@staticmethod
def _extract_team_id(teams: List[Dict[str, str]], side: str) -> Optional[str]:
for team in teams:
team_side = assertget(team, "position")
if team_side == side:
team_id = assertget(team, "id")
return team_id
raise MissingDataError
@staticmethod
def _convert_timestamp(timestamp_string: str) -> datetime:
try:
return datetime.strptime(timestamp_string, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return datetime.strptime(timestamp_string, "%Y-%m-%dT%H:%M:%SZ")
|
|
#--- ### Header
bl_info = {
"name": "MORSE scene as Python API (.py)",
"author": "Gilberto Echeverria",
"version": (1, 0, 0),
"blender": (2, 5, 9),
"api": 36147,
"location": "File>Import-Export",
"category": "Import-Export",
"description": "Save a MORSE scene as a Python description",
"warning": "",
"wiki_url": "",
"tracker_url": "https://softs.laas.fr/bugzilla/"
}
import os
import bpy
import json
import re
from morse.builder.data import *
from bpy.utils import register_module, unregister_module
"""
Morse API to save scene files
To test this module you can open this file inside a Text panel in Blender,
then run the script.
This will generate a python file in the same directory where Blender was first executed.
"""
morse_types = {
"robots": "Robot",
"sensors": "Sensor",
"actuators": "Actuator",
"middleware": "Middleware",
"modifiers": "Modifier",
}
def save_translation(obj, obj_name):
# Set its position
position_string = ''
text_buffer = ''
component_position = obj.location
if component_position[0] != 0:
position_string += 'x=%.4f' % component_position[0]
if component_position[1] != 0:
if position_string != '':
position_string += ', '
position_string += 'y=%.4f' % component_position[1]
if component_position[2] != 0:
if position_string != '':
position_string += ', '
position_string += 'z=%.4f' % component_position[2]
# Register a translation only if necessary
if position_string != '':
text_buffer += "%s.translate(%s)\n" % (obj_name, position_string)
return (text_buffer)
def save_rotation(obj, obj_name):
# Set its rotation
rotation_string = ''
text_buffer = ''
component_rotation = obj.rotation_euler
if component_rotation[0] != 0:
rotation_string += 'x=%.4f' % component_rotation[0]
if component_rotation[1] != 0:
if rotation_string != '':
rotation_string += ', '
rotation_string += 'y=%.4f' % component_rotation[1]
if component_rotation[2] != 0:
if rotation_string != '':
rotation_string += ', '
rotation_string += 'z=%.4f' % component_rotation[2]
# Register a translation only if necessary
if rotation_string != '':
text_buffer += "%s.rotate(%s)\n" % (obj_name, rotation_string)
return (text_buffer)
def save_properties(obj, obj_name):
text_buffer = ''
# Store the properties of the component
for key,prop in obj.game.properties.items():
if key not in ['Robot_Tag', 'Component_Tag', 'Middleware_Tag', 'Modifier_Tag', 'Class', 'Path']:
if prop.value != '':
if prop.type == 'STRING':
text_buffer += "%s.properties(%s = '%s')\n" % (obj_name, key, prop.value)
elif prop.type == 'FLOAT' or prop.type == 'TIMER':
text_buffer += "%s.properties(%s = %.4f)\n" % (obj_name, key, prop.value)
else:
text_buffer += "%s.properties(%s = %s)\n" % (obj_name, key, prop.value)
return (text_buffer)
def scan_scene (file_out):
""" Read all the MORSE components from a Blender file
Create lists of robots and components to save them as a text file
"""
file_out.write("from morse.builder import *\n\n")
robot_text = ''
component_text = ''
for obj in bpy.data.objects:
try:
component_path = obj.game.properties['Path'].value
# Exit if the object is not a MORSE component
except KeyError as detail:
continue
# Ignore middleware and modifier empties.
# These will be added dinamically by the builder
if 'middleware' in component_path or 'modifiers' in component_path:
continue
# Read what type of component this is,
# from the source of its python file
path_elements = component_path.split('/')
component_type = path_elements[-2]
component_name = path_elements[-1]
builder_type = morse_types[component_type]
# Swap dots for underscores in object names
obj_name = re.sub('\.', '_', obj.name)
# Create the object instance
if component_type == 'robots':
robot_text += "%s = %s('%s')\n" % (obj_name, builder_type, component_name)
robot_text += save_translation(obj, obj_name)
robot_text += save_rotation(obj, obj_name)
robot_text += save_properties(obj, obj_name)
robot_text += "\n"
# Assign component to the parent
if component_type == 'sensors' or component_type == 'actuators':
component_text += "%s = %s('%s')\n" % (obj_name, builder_type, component_name)
component_text += save_translation(obj, obj_name)
component_text += save_rotation(obj, obj_name)
parent_name = re.sub('\.', '_', obj.parent.name)
component_text += "%s.append(%s)\n" % (parent_name, obj_name)
component_text += save_properties(obj, obj_name)
component_text += "\n"
# Write the buffers to the text file
file_out.write("# Robots\n")
file_out.write(robot_text)
file_out.write("# Components\n")
file_out.write(component_text)
def scan_config(file_out):
""" Parse the contents of 'component_config.py'
Produce a configuration file that 'morsebuilder' can use to
configure the robot/middleware bindings in a scene.
"""
import component_config
file_out.write("# Scene configuration\n")
for key,value in component_config.component_mw.items():
component = re.sub('\.', '_', key)
# If the 'value' variable contains only strings, use that string
# as the name of the middleware.
# This is done for backwards compatibility with the previous
# syntax that allowed only one middleware per component
if isinstance (value[0], str):
mw = value[0]
mw = mw.lower()
file_out.write("%s.configure_mw('%s', %s)\n" % (component, mw, value))
# If using the new syntax that allows more than one middleware
# per component
else:
for item in value:
mw = item[0]
mw = mw.lower()
file_out.write("%s.configure_mw('%s', %s)\n" % (component, mw, item))
try:
component_config.component_service
file_out.write("\n")
for key,value in component_config.component_service.items():
component = re.sub('\.', '_', key)
mw = re.search('(\w+)_request_manager', value[0])
file_out.write("%s.configure_service('%s')\n" % (component, mw.group(1)))
except AttributeError as detail:
print ("\tNo services configured")
try:
component_config.component_modifier
file_out.write("\n")
for key,value in component_config.component_modifier.items():
component = re.sub('\.', '_', key)
mod = value[0]
file_out.write("%s.configure_modifier(%s)\n" % (component, mod))
except AttributeError as detail:
print ("\tNo modifiers configured")
def get_environment():
try:
ssh = bpy.data.objects['Scene_Script_Holder']
environment_file = ssh.game.properties['environment_file'].value
except KeyError as detail:
environment_file = 'indoors-1/indoor-1'
print ("No environment file specified in 'Scene_Script_Holder'\nUsing '%s' as default" % environment_file)
return environment_file
def save_scene():
print ("\nRunning from %s" % bpy.data.filepath)
filename = bpy.path.display_name_from_filepath(bpy.data.filepath) + ".py"
file_out = open(filename, "w")
print ("Saving scene robot configuration to file '%s'" % filename)
scan_scene(file_out)
scan_config(file_out)
env_name = get_environment()
file_out.write("\nenv = Environment('%s')" % env_name)
file_out.write("\nenv.create()")
file_out.close()
print ("Configuration saved")
#--- ### Operator
class MorseExporter(bpy.types.Operator):
''' Convert a MORSE scene configuration to a python script '''
bl_idname = "export_scene.morse"
bl_label = "Save MORSE scene"
bl_description = "Convert a MORSE scene configuration to a python script"
#--- Blender interface methods
#@classmethod
#def poll(cls,context):
#return (context.mode == 'OBJECT')
def execute(self,context):
save_scene()
return ('FINISHED')
def menu_draw(self, context):
self.layout.operator_context = 'INVOKE_REGION_WIN'
self.layout.operator(MorseExporter.bl_idname, "Save MORSE scene (.py)")
#--- ### Register
def register():
register_module(__name__)
bpy.types.INFO_MT_file_export.prepend(menu_draw)
def unregister():
bpy.types.INFO_MT_file_export.remove(menu_draw)
unregister_module(__name__)
#--- ### Main code
if __name__ == '__main__':
register()
#save_scene()
|
|
from selenium.common.exceptions import StaleElementReferenceException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import requests
default_timeout = 10
class count_zero_or_invisible(object):
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
elems = driver.find_elements(*self.locator)
if len(elems) == 0:
return True
else:
for elem in elems:
if elem.is_displayed():
return False
return True
except StaleElementReferenceException:
return False
class count_non_zero_and_visible(object):
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
elems = driver.find_elements(*self.locator)
if not elems or len(elems) == 0:
return False
else:
for elem in elems:
if elem.is_displayed():
return elems
return False
except StaleElementReferenceException:
return False
class count_non_zero_and_clickable(object):
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
elems = driver.find_elements(*self.locator)
if not elems or len(elems) == 0:
return False
else:
for elem in elems:
if elem.is_displayed() and elem.is_enabled():
return elems
return False
except StaleElementReferenceException:
return False
def goto(url):
global driver
driver.get(url)
def get_identifier( identifier):
locator = "css"
locatorValue = ""
if isinstance(identifier, dict):
if not 'locator' in identifier:
raise ValueError("The identifier has no specified locator - {}".format(identifier))
identifier = identifier['locator']
map_locator_to_by = {
"id": By.ID,
"class": By.CLASS_NAME,
"css": By.CSS_SELECTOR,
"xpath": By.XPATH,
"linktext":By.LINK_TEXT,
"text": By.LINK_TEXT,
"partialtext":By.PARTIAL_LINK_TEXT,
"partiallinktext":By.PARTIAL_LINK_TEXT,
"name": By.NAME,
"tag": By.TAG_NAME,
"tagname":By.TAG_NAME
}
if isinstance(identifier, str) or isinstance(identifier, unicode):
identify = identifier.split('=', 1)
if len(identify) == 1:
locatorValue = identify[0]
else:
locator = identify[0]
locatorValue = identify[1]
if not locator.lower() in map_locator_to_by:
locator = "css"
locatorValue = identifier
return (map_locator_to_by[locator],locatorValue)
def highlight(identifier, context=None, timeout = -1):
element=find(identifier, context, timeout, EC.element_to_be_clickable)
"""Highlights (blinks) a Selenium Webdriver element"""
driver = element._parent
def apply_style(s):
driver.execute_script("arguments[0].setAttribute('style', arguments[1]);",
element, s)
original_style = element.get_attribute('style')
apply_style("background: yellow; border: 2px solid red;")
time.sleep(.3)
apply_style(original_style)
def click( identifier, context=None, timeout = -1):
find(identifier, context, timeout, EC.element_to_be_clickable).click()
def input_text(identifier, text, context=None, timeout = -1):
elem = find(identifier, context, timeout)
elem.clear()
elem.send_keys(text)
def finds( identifier, context=None, timeout=-1, condition=None):
"""
@return: Returns the web element found by identifier
@rtype: selenium.webdriver.remote.webelement.WebElement
"""
if timeout == -1:
timeout = default_timeout
if isinstance(identifier, WebElement):
return identifier
if context is None:
context = driver
locator = get_identifier(identifier)
if condition is None:
condition = count_non_zero_and_visible(locator)
else:
condition = condition(locator)
wdw = WebDriverWait(driver, timeout)
try:
elems = wdw.until(condition)
return elems if isinstance(elems, list) else []
except TimeoutException:
return []
def wait_any(identifiers, **kwargs):
timeout = kwargs.get('timeout', default_timeout)
if 'timeout' in kwargs:
del kwargs['timeout']
time_start = time.time()
while True:
for identifier in identifiers:
try:
find(identifier, timeout=0, **kwargs)
return True
except Exception as ex:
pass
if time.time() - time_start > timeout:
return False
return False
def find(identifier, context=None, timeout=-1, condition=EC.presence_of_element_located):
"""
@return: Returns the web element found by identifier
@rtype: selenium.webdriver.remote.webelement.WebElement
"""
if timeout == -1:
timeout = default_timeout
if isinstance(identifier, WebElement):
return identifier
if context is None:
context = driver
locator = get_identifier(identifier)
wdw = WebDriverWait(driver=context, timeout=timeout)
try:
element = wdw.until(condition(locator))
except Exception as ex:
element = context.find_element(*locator)
return element
raise
return element
def refresh_page():
global driver
driver.refresh()
def init_driver(param_driver):
"""
@type driver: RemoteWebDriver
"""
global driver
driver = param_driver
driver.implicitly_wait(0)
def get_driver():
"""
@rtype: selenium.webdriver.remote.WebDriver
"""
global driver
return driver
def quit_driver():
global driver
if driver:
driver.quit()
driver = None
def execute_script(text,args=None):
global driver
if args is None:
args = []
return driver.execute_script(text,args)
def take_screen_shot(path):
global driver
driver.save_screenshot(path)
def get_page_source():
global driver
return driver.page_source
def retry(fun):
def wrapper(fun):
try:
fun()
except:
fun()
return wrapper
def switch_window(window=0):
if driver.window_handles:
print driver.window_handles
driver.switch_to_window(driver.window_handles[window])
else:
raise Exception("No window id is available for switching")
return driver.window_handles
def switch_frame(frame):
global driver
driver.switch_to_frame(frame)
def request():
global driver
s = requests.Session()
cookies = driver.get_cookies()
for cookie in cookies:
s.cookies.set(cookie['name'], cookie['value'])
return s
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# TODO: This is fairly repetiive and can definitely be
# condensed into a lot less code, but it's working for now
import numpy as np
import matplotlib.pyplot as plt
from .utils import calc_axis_breaks_and_limits
import sys
def scale_facet_wrap(rows, cols, positions, scaletype):
"""Set the scales on each subplot for wrapped faceting.
Parameters
----------
rows : int
number of rows in the faceted plot
cols : int
number of columns in the faceted plot
positions : list of int
zero-indexed list of faceted plot positions
scaletype : str or None
string indicating the type of scaling to apply to the rows and columns
- None : All plots get the same scale
- 'free_x' : each plot is free to determine its own x-scale, all plots have the same y-scale
- 'free_y' : each plot is free to determine its own y-scale, all plots have the same x-scale
- 'free' : plots are free to determine their own x- and y-scales
"""
x_extents, y_extents = {}, {}
# Calculate the extents for the plots
for pos in positions:
# Work on the subplot at the current position (adding 1 to pos because
# matplotlib 1-indexes their subplots)
plt.subplot(rows, cols, pos + 1)
# Update the x extents for each column
column, row = 0, 0
if scaletype in ["free", "free_x"]:
# If the x scale is free, all plots get their own x scale
column = pos % cols
row = int(pos / cols)
limits = plt.xlim()
# Get the current bounds for this column. Default lower limit is
# infinity (because all values < infinity) and the default upper limit
# is -infinity (because all values > -infinity).
lower, upper = x_extents.get((column, row), (float("inf"), float("-inf")))
lower = min(limits[0], lower)
upper = max(limits[1], upper)
x_extents[(column, row)] = (lower, upper)
column, row = 0, 0
if scaletype in ["free", "free_y"]:
# If the y scale is free, all plots get their own y scale
column = pos % cols
row = int(pos / cols)
limits = plt.ylim()
# Get the current bounds for this column. Default lower limit is
# infinity (because all values < infinity) and the default upper limit
# is -infinity (because all values > -infinity).
lower, upper = y_extents.get((column, row), (float("inf"), float("-inf")))
lower = min(limits[0], lower)
upper = max(limits[1], upper)
y_extents[(column, row)] = (lower, upper)
for pos in positions:
plt.subplot(rows, cols, pos + 1)
row = int(pos / cols)
column = pos % cols
# Find the extents for this position. Default to the extents at
# position column 0, row 0, in case all plots use the same scale
xmin, xmax = x_extents[(0, 0)]
ymin, ymax = y_extents[(0, 0)]
if scaletype in ["free", "free_x"]:
# If the x scale is free, look up the extents for this column and row
xmin, xmax = x_extents[(column, row)]
if scaletype in ["free", "free_y"]:
# If the y scale is free, look up the extents for this column and row
ymin, ymax = y_extents[(column, row)]
x_scale, x_min, x_max = calc_axis_breaks_and_limits(xmin, xmax, 4)
x_scale = np.round(x_scale, 2)
# Only apply x labels to plots if each plot has its own scale or the
# plot is in the bottom row of each column.
x_labs = []
if scaletype in ["free", "free_x"] or pos in positions[-cols:]:
x_labs = x_scale
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max )
# Set the y-axis scale and labels
y_scale, y_min, y_max = calc_axis_breaks_and_limits(ymin, ymax, 4)
y_scale = np.round(y_scale, 2)
# Only apply y labels to plots if each plot has its own scale or the
# plot is in the left column.
y_labs = []
if scaletype in ["free", "free_y"] or column == 0:
y_labs = y_scale
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
def scale_facet_grid(xdim, ydim, facet_pairs, scaletype):
# everyone gets the same scales
if scaletype is None:
min_x, max_x = 999999999, -999999999
min_y, max_y = 999999999, -999999999
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
min_x = min(min_x, min(plt.xlim()))
max_x = max(max_x, max(plt.xlim()))
min_y = min(min_y, min(plt.ylim()))
max_y = max(max_y, max(plt.ylim()))
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4)
y_scale = np.round(y_scale, 2)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4)
x_scale = np.round(x_scale, 2)
# for all axis set the individual axis limits and ticks
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
x_labs = x_scale
if pos <= (len(facet_pairs) - ydim):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
elif scaletype=="free_y":
min_x, max_x = 999999999, -999999999
min_ys, max_ys = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_bucket = int((pos-1) / ydim)
min_ys[y_bucket] = min_ys.get(y_bucket, 999999999)
max_ys[y_bucket] = max_ys.get(y_bucket, -999999999)
min_x = min(min_x, min(plt.xlim()))
max_x = max(max_x, max(plt.xlim()))
min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim()))
max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_bucket = int((pos-1) / ydim)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= (len(facet_pairs) - ydim):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
elif scaletype=="free_x":
min_y, max_y = 999999999, -999999999
min_xs, max_xs = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
min_xs[x_bucket] = min_xs.get(x_bucket, 999999999)
max_xs[x_bucket] = max_xs.get(x_bucket, -999999999)
min_y = min(min_y, min(plt.ylim()))
max_y = max(max_y, max(plt.ylim()))
min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim()))
max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= ((len(facet_pairs) - ydim)):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
else:
min_xs, max_xs = {}, {}
min_ys, max_ys = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
min_xs[x_bucket] = min_xs.get(x_bucket, 999999999)
max_xs[x_bucket] = max_xs.get(x_bucket, -999999999)
min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim()))
max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim()))
y_bucket = int((pos-1) / ydim)
min_ys[y_bucket] = min_ys.get(y_bucket, 999999999)
max_ys[y_bucket] = max_ys.get(y_bucket, -999999999)
min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim()))
max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= ((len(facet_pairs) - ydim)):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
y_bucket = int((pos-1) / ydim)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
|
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import base64
import os
import os.path
import re
import json
import string
import subprocess
import sys
import imp
import time
import shlex
import traceback
import xml.parsers.expat
import datetime
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from threading import Thread
from time import sleep
from os.path import join
from mounts import Mounts
from mounts import Mount
from patch import *
from fsfreezer import FsFreezer
from common import CommonVariables
from parameterparser import ParameterParser
from Utils import HandlerUtil
from Utils import SizeCalculation
from Utils import Status
from snapshotter import Snapshotter
from backuplogger import Backuplogger
from blobwriter import BlobWriter
from taskidentity import TaskIdentity
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
from PluginHost import PluginHost
#Main function is the only entrence to this extension handler
def main():
global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,freeze_result,snapshot_info_array,total_used_size,size_calculation_failed
try:
run_result = CommonVariables.success
run_status = 'success'
error_msg = ''
freeze_result = None
snapshot_info_array = None
total_used_size = 0
size_calculation_failed = False
HandlerUtil.waagent.LoggerInit('/dev/console','/dev/stdout')
## HandlerUtil.waagent.Logger.Log((CommonVariables.extension_name) + " started to handle." )
hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)
backup_logger = Backuplogger(hutil)
MyPatching = GetMyPatching(backup_logger)
hutil.patching = MyPatching
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
elif re.match("^([-/]*)(daemon)", a):
daemon()
except Exception as e:
sys.exit(0)
def install():
global hutil
hutil.do_parse_context('Install')
hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')
def timedelta_total_seconds(delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
def status_report_to_file(file_report_msg):
global backup_logger,hutil
hutil.write_to_status_file(file_report_msg)
backup_logger.log("file status report message:",True)
backup_logger.log(file_report_msg,True)
def status_report_to_blob(blob_report_msg):
global backup_logger,hutil,para_parser
try:
if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != ""):
blobWriter = BlobWriter(hutil)
if(blob_report_msg is not None):
blobWriter.WriteBlob(blob_report_msg,para_parser.statusBlobUri)
backup_logger.log("blob status report message:",True)
backup_logger.log(blob_report_msg,True)
else:
backup_logger.log("blob_report_msg is none",True)
except Exception as e:
err_msg='cannot write status to the status blob'+traceback.format_exc()
backup_logger.log(err_msg, True, 'Warning')
def get_status_to_report(status, status_code, message, snapshot_info = None):
global MyPatching,backup_logger,hutil,para_parser,total_used_size,size_calculation_failed
blob_report_msg = None
file_report_msg = None
try:
if total_used_size == -1 :
sizeCalculation = SizeCalculation.SizeCalculation(patching = MyPatching , logger = backup_logger)
total_used_size,size_calculation_failed = sizeCalculation.get_total_used_size()
number_of_blobs = len(para_parser.blobs)
maximum_possible_size = number_of_blobs * 1099511627776
if(total_used_size>maximum_possible_size):
total_used_size = maximum_possible_size
backup_logger.log("Assertion Check, total size : {0} ,maximum_possible_size : {1}".format(total_used_size,maximum_possible_size),True)
if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != ""):
blob_report_msg, file_report_msg = hutil.do_status_report(operation='Enable',status=status,\
status_code=str(status_code),\
message=message,\
taskId=para_parser.taskId,\
commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
snapshot_info=snapshot_info,\
total_size = total_used_size,\
failure_flag = size_calculation_failed)
except Exception as e:
err_msg='cannot get status report parameters , Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(err_msg, True, 'Warning')
return blob_report_msg, file_report_msg
def exit_with_commit_log(status,result,error_msg, para_parser):
global backup_logger
backup_logger.log(error_msg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit(para_parser.logsBlobUri)
blob_report_msg, file_report_msg = get_status_to_report(status, result, error_msg, None)
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
sys.exit(0)
def exit_if_same_taskId(taskId):
global backup_logger,hutil,para_parser
trans_report_msg = None
taskIdentity = TaskIdentity()
last_taskId = taskIdentity.stored_identity()
if(taskId == last_taskId):
backup_logger.log("TaskId is same as last, so skip with Processed Status, current:" + str(taskId) + "== last:" + str(last_taskId), True)
status=CommonVariables.status_success
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.SuccessAlreadyProcessedInput)
status_code=CommonVariables.SuccessAlreadyProcessedInput
message='TaskId AlreadyProcessed nothing to do'
try:
if(para_parser is not None):
blob_report_msg, file_report_msg = hutil.do_status_report(operation='Enable',status=status,\
status_code=str(status_code),\
message=message,\
taskId=taskId,\
commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
snapshot_info=None)
status_report_to_file(file_report_msg)
except Exception as e:
err_msg='cannot write status to the status file, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(err_msg, True, 'Warning')
sys.exit(0)
def convert_time(utcTicks):
return datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds = utcTicks / 10)
def get_value_from_configfile(key):
global backup_logger
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
else:
backup_logger.log("Config File doesn't have the key :" + key, True, 'Info')
except Exception as e:
errorMsg = " Unable to get config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
backup_logger.log(errorMsg, True, 'Warning')
return value
def set_value_to_configfile(key, value):
configfile = '/etc/azure/vmbackup.conf'
try :
backup_logger.log('setting doseq flag in config file', True, 'Info')
if not os.path.exists(os.path.dirname(configfile)):
os.makedirs(os.path.dirname(configfile))
config = ConfigParsers.RawConfigParser()
if os.path.exists(configfile):
config.read(configfile)
if config.has_section('SnapshotThread'):
if config.has_option('SnapshotThread', key):
config.remove_option('SnapshotThread', key)
else:
config.add_section('SnapshotThread')
else:
config.add_section('SnapshotThread')
config.set('SnapshotThread', key, value)
with open(configfile, 'w') as config_file:
config.write(config_file)
except Exception as e:
errorMsg = " Unable to set config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
backup_logger.log(errorMsg, True, 'Warning')
return value
def freeze_snapshot(timeout):
try:
global hutil,backup_logger,run_result,run_status,error_msg,freezer,freeze_result,para_parser,snapshot_info_array,g_fsfreeze_on
if(get_value_from_configfile('doseq') == '2'):
set_value_to_configfile('doseq', '1')
if(get_value_from_configfile('doseq') != '1'):
set_value_to_configfile('doseq', '2')
snap_shotter = Snapshotter(backup_logger)
time_before_freeze = datetime.datetime.now()
freeze_result = freezer.freeze_safe(timeout)
time_after_freeze = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("FreezeTime", str(time_after_freeze-time_before_freeze-datetime.timedelta(seconds=5)))
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
backup_logger.log('T:S freeze result ' + str(freeze_result))
if(freeze_result is not None and len(freeze_result.errors) > 0):
run_result = CommonVariables.FailedFsFreezeFailed
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(freeze_result)
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed)
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True, 'Warning')
if(get_value_from_configfile('doseq') == '2'):
set_value_to_configfile('doseq', '0')
else:
backup_logger.log('T:S doing snapshot now...')
time_before_snapshot = datetime.datetime.now()
snapshot_result,snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep = snap_shotter.snapshotall(para_parser, freezer)
time_after_snapshot = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("SnapshotTime", str(time_after_snapshot-time_before_snapshot))
backup_logger.log('T:S snapshotall ends...', True)
if(get_value_from_configfile('doseq') == '2'):
set_value_to_configfile('doseq', '0')
if(snapshot_result is not None and len(snapshot_result.errors) > 0):
if unable_to_sleep:
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(snapshot_result)
backup_logger.log(error_msg, True, 'Warning')
elif is_inconsistent == True :
set_value_to_configfile('doseq', '1')
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(snapshot_result)
backup_logger.log(error_msg, True, 'Warning')
else:
error_msg = 'T:S snapshot result: ' + str(snapshot_result)
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
if all_failed:
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork)
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
else:
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork)
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
run_status = 'error'
backup_logger.log(error_msg, True, 'Error')
elif check_snapshot_array_fail() == True:
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'T:S Enable failed with error in snapshot_array index'
backup_logger.log(error_msg, True, 'Error')
else:
run_result = CommonVariables.success
run_status = 'success'
error_msg = 'Enable Succeeded'
backup_logger.log("T:S " + error_msg, True)
except Exception as e:
if(get_value_from_configfile('doseq') == '2'):
set_value_to_configfile('doseq', '0')
errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'Enable failed with exception in safe freeze or snapshot '
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
#snapshot_done = True
def check_snapshot_array_fail():
global snapshot_info_array, backup_logger
snapshot_array_fail = False
if snapshot_info_array is not None and snapshot_info_array !=[]:
for snapshot_index in range(len(snapshot_info_array)):
if(snapshot_info_array[snapshot_index].isSuccessful == False):
backup_logger.log('T:S snapshot failed at index ' + str(snapshot_index), True)
snapshot_array_fail = True
break
return snapshot_array_fail
def daemon():
global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,para_parser,snapshot_done,snapshot_info_array,g_fsfreeze_on,total_used_size
#this is using the most recent file timestamp.
hutil.do_parse_context('Executing')
freezer = FsFreezer(patching= MyPatching, logger = backup_logger)
global_error_result = None
# precheck
freeze_called = False
configfile='/etc/azure/vmbackup.conf'
thread_timeout=str(60)
#Adding python version to the telemetry
try:
python_version_info = sys.version_info
python_version = str(sys.version_info[0])+ '.' + str(sys.version_info[1]) + '.' + str(sys.version_info[2])
HandlerUtil.HandlerUtility.add_to_telemetery_data("pythonVersion", python_version)
except Exception as e:
errMsg = 'Failed to do retrieve python version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
try:
if(freezer.mounts is not None):
hutil.partitioncount = len(freezer.mounts.mounts)
backup_logger.log(" configfile " + str(configfile), True)
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread','timeout'):
thread_timeout= config.get('SnapshotThread','timeout')
except Exception as e:
errMsg='cannot read config file or file not present'
backup_logger.log(errMsg, True, 'Warning')
backup_logger.log("final thread timeout" + thread_timeout, True)
snapshot_info_array = None
try:
# we need to freeze the file system first
backup_logger.log('starting daemon', True)
"""
protectedSettings is the privateConfig passed from Powershell.
WATCHOUT that, the _context_config are using the most freshest timestamp.
if the time sync is alive, this should be right.
"""
if(hutil.is_prev_in_transition()):
backup_logger.log('retrieving the previous logs for this again inside daemon', True)
backup_logger.set_prev_log()
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
commandToExecute = para_parser.commandToExecute
#validate all the required parameter here
backup_logger.log(commandToExecute,True)
if(CommonVariables.iaas_install_command in commandToExecute.lower()):
backup_logger.log('install succeed.',True)
run_status = 'success'
error_msg = 'Install Succeeded'
run_result = CommonVariables.success
backup_logger.log(error_msg)
elif(CommonVariables.iaas_vmbackup_command in commandToExecute.lower()):
if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
run_status = 'error'
error_msg = 'required field empty or not correct'
backup_logger.log(error_msg, True, 'Error')
else:
backup_logger.log('commandToExecute is ' + commandToExecute, True)
"""
make sure the log is not doing when the file system is freezed.
"""
temp_status= 'success'
temp_result=CommonVariables.ExtensionTempTerminalState
temp_msg='Transitioning state in extension'
blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
if(hutil.is_status_file_exists()):
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
backup_logger.log('doing freeze now...', True)
#partial logging before freeze
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit_to_blob(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.")
backup_logger.log('commandToExecute is ' + commandToExecute, True)
PluginHostObj = PluginHost(logger=backup_logger)
PluginHostErrorCode,dobackup,g_fsfreeze_on = PluginHostObj.pre_check()
doFsConsistentbackup = False
if not (PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigParsing or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigParsing or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigNotFound or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigPermissionError or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigNotFound or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigPermissionError):
backup_logger.log('App Consistent Consistent Backup Enabled', True)
HandlerUtil.HandlerUtility.add_to_telemetery_data("isPrePostEnabled", "true")
if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
backup_logger.log('Triggering File System Consistent Backup because of error code' + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(PluginHostErrorCode), True)
doFsConsistentbackup = True
if not doFsConsistentbackup:
preResult = PluginHostObj.pre_script()
dobackup = preResult.continueBackup
if(g_fsfreeze_on == False and preResult.anyScriptFailed):
dobackup = False
if dobackup:
freeze_snapshot(thread_timeout)
backup_logger.log('unfreeze ends...')
if not doFsConsistentbackup:
postResult = PluginHostObj.post_script()
if not postResult.continueBackup:
dobackup = False
if(g_fsfreeze_on == False and postResult.anyScriptFailed):
dobackup = False
if not dobackup:
if run_result == CommonVariables.success and PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = PluginHostErrorCode
hutil.SetExtErrorCode(PluginHostErrorCode)
error_msg = 'Plugin Host Precheck Failed'
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
if run_result == CommonVariables.success:
pre_plugin_errors = preResult.errors
for error in pre_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success and error.errorCode != CommonVariables.PrePost_ScriptStatus_Warning:
run_status = 'error'
run_result = error.errorCode
hutil.SetExtErrorCode(error.errorCode)
error_msg = 'PreScript failed for the plugin ' + error.pluginName
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
break
if run_result == CommonVariables.success:
post_plugin_errors = postResult.errors
for error in post_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success and error.errorCode != CommonVariables.PrePost_ScriptStatus_Warning:
run_status = 'error'
run_result = error.errorCode
hutil.SetExtErrorCode(error.errorCode)
error_msg = 'PostScript failed for the plugin ' + error.pluginName
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
break
if run_result == CommonVariables.success and not doFsConsistentbackup and not (preResult.anyScriptFailed or postResult.anyScriptFailed):
run_status = 'success'
run_result = CommonVariables.success_appconsistent
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)
error_msg = 'Enable Succeeded with App Consistent Snapshot'
backup_logger.log(error_msg, True)
else:
run_status = 'error'
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
error_msg = 'command is not correct'
backup_logger.log(error_msg, True, 'Error')
except Exception as e:
errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
global_error_result = e
"""
we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
"""
try:
if(global_error_result is not None):
if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):
run_result = CommonVariables.error_12
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_12)
elif(para_parser is None):
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
else:
run_result = CommonVariables.error
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
run_status = 'error'
error_msg += ('Enable failed.' + str(global_error_result))
status_report_msg = None
HandlerUtil.HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeNameDict[hutil.ExtErrorCode]))
total_used_size = -1
blob_report_msg, file_report_msg = get_status_to_report(run_status,run_result,error_msg, snapshot_info_array)
if(hutil.is_status_file_exists()):
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
except Exception as e:
errMsg = 'Failed to log status in extension'
backup_logger.log(errMsg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.")
backup_logger.commit_to_local()
sys.exit(0)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')
def disable():
hutil.do_parse_context('Disable')
hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0,'Update','success','0', 'Update Succeeded')
def enable():
global backup_logger,hutil,error_msg,para_parser
hutil.do_parse_context('Enable')
try:
backup_logger.log('starting to enable', True)
# handle the restoring scenario.
mi = MachineIdentity()
stored_identity = mi.stored_identity()
if(stored_identity is None):
mi.save_identity()
else:
current_identity = mi.current_identity()
if(current_identity != stored_identity):
current_seq_no = -1
backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
hutil.set_last_seq(current_seq_no)
mi.save_identity()
hutil.exit_if_same_seq()
"""
protectedSettings is the privateConfig passed from Powershell.
WATCHOUT that, the _context_config are using the most freshest timestamp.
if the time sync is alive, this should be right.
"""
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
if(bool(public_settings) and not protected_settings): #Protected settings decryption failed case
error_msg = "unable to load certificate"
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound)
temp_result=CommonVariables.FailedHandlerGuestAgentCertificateNotFound
temp_status= 'error'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
utcTicksLong = int(para_parser.commandStartTimeUTCTicks)
backup_logger.log('utcTicks in long format' + str(utcTicksLong), True)
commandStartTime = convert_time(utcTicksLong)
utcNow = datetime.datetime.utcnow()
backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow), True)
timespan = utcNow - commandStartTime
MAX_TIMESPAN = 150 * 60 # in seconds
# handle the machine identity for the restoration scenario.
total_span_in_seconds = timedelta_total_seconds(timespan)
backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))
if(para_parser.taskId is not None and para_parser.taskId != ""):
backup_logger.log('taskId: ' + str(para_parser.taskId), True)
exit_if_same_taskId(para_parser.taskId)
taskIdentity = TaskIdentity()
taskIdentity.save_identity(para_parser.taskId)
hutil.save_seq()
temp_status= 'transitioning'
temp_result=CommonVariables.success
temp_msg='Transitioning state in enable'
blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
file_status_upload_thread=Thread(target=status_report_to_file, args=(file_report_msg,))
file_status_upload_thread.start()
blob_status_upload_thread=Thread(target=status_report_to_blob, args=(blob_report_msg,))
blob_status_upload_thread.start()
if(hutil.is_prev_in_transition()):
backup_logger.log('retrieving the previous logs for this', True)
backup_logger.set_prev_log()
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
log_upload_thread=Thread(target=thread_for_log_upload)
log_upload_thread.start()
log_upload_thread.join(60)
file_status_upload_thread.join(30)
blob_status_upload_thread.join(60)
start_daemon();
sys.exit(0)
except Exception as e:
errMsg = 'Failed to call the daemon with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
global_error_result = e
temp_status= 'error'
temp_result=CommonVariables.error
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
error_msg = 'Failed to call the daemon'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
def thread_for_log_upload():
global para_parser,backup_logger
backup_logger.commit(para_parser.logsBlobUri)
def start_daemon():
args = [os.path.join(os.getcwd(), "main/handle.sh"), "daemon"]
backup_logger.log("start_daemon with args: {0}".format(args), True)
#This process will start a new background process by calling
# handle.py -daemon
#to run the script and will exit itself immediatelly.
#Redirect stdout and stderr to /dev/null. Otherwise daemon process will
#throw Broke pipe exeception when parent process exit.
devnull = open(os.devnull, 'w')
child = subprocess.Popen(args, stdout=devnull, stderr=devnull)
if __name__ == '__main__' :
main()
|
|
"""Utillity class for processing scansion and text."""
import unicodedata
import sys
import re
from typing import Dict, List, Tuple
__author__ = ['Todd Cook <todd.g.cook@gmail.com>']
__license__ = 'MIT License'
"""Helper methods for processing scansion"""
qu_matcher = re.compile("[qQ][uU]")
def remove_punctuation_dict() -> Dict[int, None]:
"""
Provide a dictionary for removing punctuation, swallowing spaces.
:return dict with punctuation from the unicode table
>>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate(
... remove_punctuation_dict()).lstrip())
Im ok Oh Fine
"""
tmp = dict((i, None) for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
return tmp
def punctuation_for_spaces_dict() -> Dict[int, str]:
"""
Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion
to keep stress patterns in alignment with original vowel positions in the verse.
:return dict with punctuation from the unicode table
>>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate(
... punctuation_for_spaces_dict()).strip())
I m ok Oh Fine
"""
return dict((i, " ") for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
def differences(scansion: str, candidate: str) -> List[int]:
"""
Given two strings, return a list of index positions where the contents differ.
:param scansion:
:param candidate:
:return:
>>> differences("abc", "abz")
[2]
"""
before = scansion.replace(" ", "")
after = candidate.replace(" ", "")
diffs = []
for idx, tmp in enumerate(before):
if before[idx] != after[idx]:
diffs.append(idx)
return diffs
def mark_list(line: str) -> List[int]:
"""
Given a string, return a list of index positions where a character/non blank space exists.
:param line:
:return:
>>> mark_list(" a b c")
[1, 3, 5]
"""
marks = []
for idx, car in enumerate(list(line)):
if car != " ":
marks.append(idx)
return marks
def space_list(line: str) -> List[int]:
"""
Given a string, return a list of index positions where a blank space occurs.
:param line:
:return:
>>> space_list(" abc ")
[0, 1, 2, 3, 7]
"""
spaces = []
for idx, car in enumerate(list(line)):
if car == " ":
spaces.append(idx)
return spaces
def flatten(list_of_lists):
"""
Given a list of lists, flatten all the items into one list.
:param list_of_lists:
:return:
>>> flatten([ [1, 2, 3], [4, 5, 6]])
[1, 2, 3, 4, 5, 6]
"""
return [val for sublist in list_of_lists for val in sublist]
def to_syllables_with_trailing_spaces(line: str, syllables: List[str]) -> List[str]:
"""
Given a line of syllables and spaces, and a list of syllables, produce a list of the
syllables with trailing spaces attached as approriate.
:param line:
:param syllables:
:return:
>>> to_syllables_with_trailing_spaces(' arma virumque cano ',
... ['ar', 'ma', 'vi', 'rum', 'que', 'ca', 'no' ])
[' ar', 'ma ', 'vi', 'rum', 'que ', 'ca', 'no ']
"""
syllabs_spaces = []
idx = 0
linelen = len(line)
for position, syl in enumerate(syllables):
if not syl in line and re.match('w', syl, flags=re.IGNORECASE):
syl = syl.replace('w', 'u').replace('W', 'U')
start = line.index(syl, idx)
idx = start + len(syl)
if position == 0 and start > 0: # line starts with punctuation, substituted w/ spaces
syl = (start * " ") + syl
if idx + 1 > len(line):
syllabs_spaces.append(syl)
return syllabs_spaces
nextchar = line[idx]
if nextchar != " ":
syllabs_spaces.append(syl)
continue
else:
tmpidx = idx
while tmpidx < linelen and nextchar == " ":
syl += " "
tmpidx += 1
if tmpidx == linelen:
syllabs_spaces.append(syl)
return syllabs_spaces
nextchar = line[tmpidx]
idx = tmpidx - 1
syllabs_spaces.append(syl)
return syllabs_spaces
def join_syllables_spaces(syllables: List[str], spaces: List[int]) -> str:
"""
Given a list of syllables, and a list of integers indicating the position of spaces, return
a string that has a space inserted at the designated points.
:param syllables:
:param spaces:
:return:
>>> join_syllables_spaces(["won", "to", "tree", "dun"], [3, 6, 11])
'won to tree dun'
"""
syllable_line = list("".join(syllables))
for space in spaces:
syllable_line.insert(space, " ")
return "".join(flatten(syllable_line))
def starts_with_qu(word) -> bool:
"""
Determine whether or not a word start with the letters Q and U.
:param word:
:return:
>>> starts_with_qu("qui")
True
>>> starts_with_qu("Quirites")
True
"""
return qu_matcher.search(word) is not None
def stress_positions(stress: str, scansion: str) -> List[int]:
"""
Given a stress value and a scansion line, return the index positions of the stresses.
:param stress:
:param scansion:
:return:
>>> stress_positions("-", " - U U - UU - U U")
[0, 3, 6]
"""
line = scansion.replace(" ", "")
stresses = []
for idx, char in enumerate(line):
if char == stress:
stresses.append(idx)
return stresses
def merge_elisions(elided: List[str]) -> str:
"""
Given a list of strings with different space swapping elisions applied, merge the elisions,
taking the most without compounding the omissions.
:param elided:
:return:
>>> merge_elisions([
... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"])
'ignav agua mult hiatus'
"""
results = list(elided[0])
for line in elided:
for idx, car in enumerate(line):
if car == " ":
results[idx] = " "
return "".join(results)
def move_consonant_right(letters: List[str], positions: List[int]) -> List[str]:
"""
Given a list of letters, and a list of consonant positions, move the consonant positions to
the right, merging strings as necessary.
:param letters:
:param positions:
:return:
>>> move_consonant_right(list("abbra"), [ 2, 3])
['a', 'b', '', '', 'bra']
"""
for pos in positions:
letters[pos + 1] = letters[pos] + letters[pos + 1]
letters[pos] = ""
return letters
def move_consonant_left(letters: List[str], positions: List[int]) -> List[str]:
"""
Given a list of letters, and a list of consonant positions, move the consonant positions to
the left, merging strings as necessary.
:param letters:
:param positions:
:return:
>>> move_consonant_left(['a', 'b', '', '', 'bra'], [1])
['ab', '', '', '', 'bra']
"""
for pos in positions:
letters[pos - 1] = letters[pos - 1] + letters[pos]
letters[pos] = ""
return letters
def merge_next(letters: List[str], positions: List[int]) -> List[str]:
"""
Given a list of letter positions, merge each letter with its next neighbor.
:param letters:
:param positions:
:return:
>>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2])
['ab', '', 'ov', '', 'o']
>>> # Note: because it operates on the original list passed in, the effect is not cummulative:
>>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2, 3])
['ab', '', 'ov', 'o', '']
"""
for pos in positions:
letters[pos] = letters[pos] + letters[pos + 1]
letters[pos + 1] = ""
return letters
def remove_blanks(letters: List[str]):
"""
Given a list of letters, remove any empty strings.
:param letters:
:return:
>>> remove_blanks(['a', '', 'b', '', 'c'])
['a', 'b', 'c']
"""
cleaned = []
for letter in letters:
if letter != "":
cleaned.append(letter)
return cleaned
def split_on(word: str, section: str) -> Tuple[str, str]:
"""
Given a string, split on a section, and return the two sections as a tuple.
:param word:
:param section:
:return:
>>> split_on('hamrye', 'ham')
('ham', 'rye')
"""
return word[:word.index(section)] + section, word[word.index(section) + len(section):]
def remove_blank_spaces(syllables: List[str]) -> List[str]:
"""
Given a list of letters, remove any blank spaces or empty strings.
:param syllables:
:return:
>>> remove_blank_spaces(['', 'a', ' ', 'b', ' ', 'c', ''])
['a', 'b', 'c']
"""
cleaned = []
for syl in syllables:
if syl == " " or syl == '':
pass
else:
cleaned.append(syl)
return cleaned
def overwrite(char_list: List[str], regexp: str, quality: str, offset: int = 0) -> List[str]:
"""
Given a list of characters and spaces, a matching regular expression, and a quality or
character, replace the matching character with a space, overwriting with an offset and
a multiplier if provided.
:param char_list:
:param regexp:
:param quality:
:param offset:
:return:
>>> overwrite(list('multe igne'), r'e\s[aeiou]', ' ')
['m', 'u', 'l', 't', ' ', ' ', 'i', 'g', 'n', 'e']
"""
long_matcher = re.compile(regexp)
line = "".join(char_list)
long_positions = long_matcher.finditer(line)
for match in long_positions:
(start, end) = match.span() # pylint: disable=unused-variable
char_list[start + offset] = quality
return char_list
def overwrite_dipthong(char_list: List[str], regexp: str, quality: str) -> List[str]:
"""
Given a list of characters and spaces, a matching regular expression, and a quality or
character, replace the matching character with a space, overwriting with an offset and
a multiplier if provided.
:param char_list: a list of characters
:param regexp: a matching regular expression
:param quality: a quality or character to replace
:return: a list of characters with the dipthong overwritten
>>> overwrite_dipthong(list('multae aguae'), r'ae\s[aeou]', ' ')
['m', 'u', 'l', 't', ' ', ' ', ' ', 'a', 'g', 'u', 'a', 'e']
"""
long_matcher = re.compile(regexp)
line = "".join(char_list)
long_positions = long_matcher.finditer(line)
for match in long_positions:
(start, end) = match.span() # pylint: disable=unused-variable
char_list[start] = quality
char_list[start + 1] = quality
return char_list
def get_unstresses(stresses: List[int], count: int) -> List[int]:
"""
Given a list of stressed positions, and count of possible positions, return a list of
the unstressed positions.
:param stresses: a list of stressed positions
:param count: the number of possible positions
:return: a list of unstressed positions
>>> get_unstresses([0, 3, 6, 9, 12, 15], 17)
[1, 2, 4, 5, 7, 8, 10, 11, 13, 14, 16]
"""
return list(set(range(count)) - set(stresses))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
from ryu.base import app_manager
from ryu.lib import hub
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import packet, ethernet, arp, ipv4, icmp, ether_types, mpls, tcp, udp
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto.ofproto_v1_3 import OFP_DEFAULT_PRIORITY
from command_sender import CommandSender
from network_monitor import NetworkMonitor
from route_calculator import RouteCalculator
from flow_collector import FlowCollector
'''
###For 2 chapter###
when packet_in then
packet_out ONLY!
comparison for base_app2
----test----
Linear topology
iperfTCP
'''
class HLApp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'network_monitor': NetworkMonitor,
'flow_collector':FlowCollector
}
def __init__(self, *args, **kwargs):
super(HLApp, self).__init__(*args, **kwargs)
self.network_monitor = kwargs['network_monitor'] # context
self.flow_collector = kwargs['flow_collector'] #context
self.commandSender = CommandSender.get_instance() # util
self.routeCalculator = RouteCalculator.get_instance() # util
self.DISCOVER_PERIOD = 3
self.network_monitor_thread = hub.spawn(self._monitor)
# context
def _monitor(self):
while True:
hub.sleep(self.DISCOVER_PERIOD)
self.network_monitor.pre_adjacency_matrix = copy.deepcopy(self.network_monitor.adjacency_matrix)
self.network_monitor.update_topology()
if self.network_monitor.pre_adjacency_matrix != self.network_monitor.adjacency_matrix:
self.logger.info('***********adjacency_matrix CHANGED***********')
self.routeCalculator.pre_path_table = copy.deepcopy(self.routeCalculator.path_table)
self.routeCalculator.path_table = self.routeCalculator.get_path_table(
self.network_monitor.adjacency_matrix,
self.network_monitor.dpids_to_access_port)
self.routeCalculator.pre_route_table = copy.deepcopy(self.routeCalculator.route_table)
self.routeCalculator.route_table = self.routeCalculator.get_route_table(
self.network_monitor.adjacency_matrix,
self.network_monitor.dpids_to_access_port)
if self.routeCalculator.pre_path_table != self.routeCalculator.path_table:
self.logger.info('------path_table CHANGED-------')
# install table-miss flow entry for each switch
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
# add miss entry
self.commandSender.add_flow(datapath, 0, match, actions)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
buffer_id = msg.buffer_id
datapath = msg.datapath
ofproto = datapath.ofproto
dpid = datapath.id
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
return
arp_pkt = pkt.get_protocol(arp.arp)
if isinstance(arp_pkt, arp.arp): # arp request and reply
print("----arp-------")
arp_src_ip = arp_pkt.src_ip
arp_dst_ip = arp_pkt.dst_ip
self.network_monitor.dpid_ip_to_port.setdefault(dpid,{})
self.network_monitor.dpid_ip_to_port[dpid][arp_src_ip] = in_port
if arp_dst_ip in self.network_monitor.dpid_ip_to_port[dpid]:
out_port = self.network_monitor.dpid_ip_to_port[dpid][arp_dst_ip]
else:
out_port = ofproto.OFPP_FLOOD
data = msg.data
self.commandSender.packet_out(datapath, in_port, out_port, data)
self.__register_access_info(dpid, arp_src_ip, in_port)
ipv4_pkt = pkt.get_protocol(ipv4.ipv4)
if isinstance(ipv4_pkt,ipv4.ipv4):
src_ip = ipv4_pkt.src
dst_ip = ipv4_pkt.dst
src_sw = self.__get_host_location(src_ip)
dst_sw = self.__get_host_location(dst_ip)
if src_sw and dst_sw:# end-to-end connection
print('--------------end-to-end connection--------------')
src_dpid = src_sw[0]
dst_dpid = dst_sw[0]
src_in_port = src_sw[1]
dst_out_port = dst_sw[1]
icmp_pkt = pkt.get_protocol(icmp.icmp)
tcp_pkt = pkt.get_protocol(tcp.tcp)
if isinstance(icmp_pkt,icmp.icmp):
print("----icmp-------")
if src_dpid == dst_dpid:
print("src_dpid == dst_dpid")
data = msg.data
self.commandSender.packet_out(datapath, in_port, dst_out_port, data, buffer_id)
else:
print("src_dpid != dst_dpid",src_dpid,dst_dpid)
if dpid == src_dpid:
route = self.routeCalculator.get_route(src_dpid, dst_dpid)
out_port = self.network_monitor.links_dpid_to_port[(route[0],route[1])][0]
elif dpid == dst_dpid:
out_port = dst_out_port
else:
route = self.routeCalculator.get_route(src_dpid, dst_dpid)
index = 0
for i in range(len(route)):
if route[i] == dpid:
index = i
break
out_port = self.network_monitor.links_dpid_to_port[(route[index],route[index+1])][0]
data = msg.data
self.commandSender.packet_out(datapath, in_port, out_port, data)
return
if isinstance(tcp_pkt,tcp.tcp):
print("------tcp-----------")
if src_dpid == dst_dpid:
print("src_dpid == dst_dpid")
data = msg.data
self.commandSender.packet_out(datapath, in_port, dst_out_port, data, buffer_id)
else:
print("src_dpid != dst_dpid")
if dpid == src_dpid:
print('dpid:',dpid)
route = self.routeCalculator.get_route(src_dpid, dst_dpid)
out_port = self.network_monitor.links_dpid_to_port[(route[0],route[1])][0]
print('out_port:',out_port)
elif dpid == dst_dpid:
print('dpid:',dpid)
out_port = dst_out_port
print('out_port:',out_port)
else:
print('dpid:',dpid)
route = self.routeCalculator.get_route(src_dpid, dst_dpid)
index = 0
for i in range(len(route)):
if route[i] == dpid:
index = i
break
out_port = self.network_monitor.links_dpid_to_port[(route[index],route[index+1])][0]
print('out_port:',out_port)
data = msg.data
self.commandSender.packet_out(datapath, in_port, out_port, data)
return
def __add_last(self, dpid, out_port, label):
match = {
"dl_type":ether_types.ETH_TYPE_MPLS,
"mpls_label":label,
}
actions = [{"type":"OUTPUT","port":out_port}]
self.commandSender.add_flow_rest_1(dpid, OFP_DEFAULT_PRIORITY, match, actions)
def _get_mpls_label(self,traffic):
for label in self.pathPreInstall.mpls_to_path.keys():
if self.pathPreInstall.mpls_to_path[label] == traffic:
return label
return None
def __add_mpls(self, pkt_old, label, src_mac, dst_mac):
pkt_new = packet.Packet()
mpls_proto = mpls.mpls(label=label) # label:20bit(0~1048576-1), exp(QoS):3bit, bsb:1bit, ttl:8bit
pkt_new.add_protocol(ethernet.ethernet(dst=dst_mac, src=src_mac,ethertype=ether_types.ETH_TYPE_MPLS))
pkt_new.add_protocol(mpls_proto)
for i in range(1,len(pkt_old)):#[ethernet, ipv4, tcp,..]
pkt_new.add_protocol(pkt_old[i])
return pkt_new
def __remove_mpls(self,pkt_old, src_mac, dst_mac):
pkt_new = packet.Packet()
pkt_new.add_protocol(ethernet.ethernet(dst=dst_mac, src=src_mac,ethertype=ether_types.ETH_TYPE_IP))
for i in range(2,len(pkt_old)):#[ethernet, mpls, ipv4, tcp,..]
pkt_new.add_protocol(pkt_old[i])
return pkt_new
def __register_access_info(self, dpid, ip, port):
if port in self.network_monitor.dpids_to_access_port[dpid]: # {1: [4], 2: [], 3: [], 4: [2, 3], 5: [2, 3], 6: [2, 3]}
self.network_monitor.access_table[(dpid,port)] = ip
def __get_host_location(self,host):
for sw in self.network_monitor.access_table.keys():
if self.network_monitor.access_table[sw] == host:
return sw
return None
# 3 layer
def install_flow(self, traffic, dst_ip, src_in_port, dst_out_port):
n = len(traffic)
for j in range(n):
dpid = traffic[j]
priority = OFP_DEFAULT_PRIORITY
if j == 0:
print("install flow on src_dpid:",dpid)
in_port = src_in_port
out_port = self.network_monitor.links_dpid_to_port[(traffic[j],traffic[j+1])][0]
elif j == n - 1:
print("install flow on dst_dpid:",dpid)
in_port = self.network_monitor.links_dpid_to_port[(traffic[j-1],traffic[j])][1]
out_port = dst_out_port
else:
print("install flow on dpid:",dpid)
in_port = self.network_monitor.links_dpid_to_port[(traffic[j-1],traffic[j])][1]
out_port = self.network_monitor.links_dpid_to_port[(traffic[j],traffic[j+1])][0]
match = {
"dl_type":ether_types.ETH_TYPE_IP,
"in_port":in_port,
"nw_dst":dst_ip,
}
actions = [{"type":"OUTPUT","port":out_port}]
self.commandSender.add_flow_rest_1(dpid, priority, match, actions, 100)
#4 layer
def install_flow_tcp(self, traffic, src_ip, dst_ip, src_in_port, dst_out_port, src_tcp, dst_tcp):
n = len(traffic)
for j in range(n):
dpid = traffic[j]
priority = OFP_DEFAULT_PRIORITY
if j == 0:
print("install flow on src_dpid:",dpid)
in_port = src_in_port
out_port = self.network_monitor.links_dpid_to_port[(traffic[j],traffic[j+1])][0]
elif j == n - 1:
print("install flow on dst_dpid:",dpid)
in_port = self.network_monitor.links_dpid_to_port[(traffic[j-1],traffic[j])][1]
out_port = dst_out_port
else:
print("install flow on dpid:",dpid)
in_port = self.network_monitor.links_dpid_to_port[(traffic[j-1],traffic[j])][1]
out_port = self.network_monitor.links_dpid_to_port[(traffic[j],traffic[j+1])][0]
match = {
"dl_type":ether_types.ETH_TYPE_IP,
"nw_proto":6,
"in_port":in_port,
"nw_src":src_ip,
"nw_dst":dst_ip,
"tp_src":src_tcp,
"tp_dst":dst_tcp
}
actions = [{"type":"OUTPUT","port":out_port}]
self.commandSender.add_flow_rest_1(dpid, priority, match, actions, 10)
#4 layer
def install_flow_udp(self, traffic, src_ip, dst_ip, src_in_port, dst_out_port, src_tcp, dst_tcp):
n = len(traffic)
for j in range(n):
dpid = traffic[j]
priority = OFP_DEFAULT_PRIORITY
if j == 0:
print("install flow on src_dpid:",dpid)
in_port = src_in_port
out_port = self.network_monitor.links_dpid_to_port[(traffic[j],traffic[j+1])][0]
elif j == n - 1:
print("install flow on dst_dpid:",dpid)
in_port = self.network_monitor.links_dpid_to_port[(traffic[j-1],traffic[j])][1]
out_port = dst_out_port
else:
print("install flow on dpid:",dpid)
in_port = self.network_monitor.links_dpid_to_port[(traffic[j-1],traffic[j])][1]
out_port = self.network_monitor.links_dpid_to_port[(traffic[j],traffic[j+1])][0]
match = {
"dl_type":ether_types.ETH_TYPE_IP,
"nw_proto":6,
"in_port":in_port,
"nw_src":src_ip,
"nw_dst":dst_ip,
"up_src":src_tcp,
"up_dst":dst_tcp
}
actions = [{"type":"OUTPUT","port":out_port}]
self.commandSender.add_flow_rest_1(dpid, priority, match, actions, 10)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Weight initializer."""
from __future__ import absolute_import, print_function
import re
import logging
import warnings
import json
from math import sqrt
import numpy as np
from .base import string_types
from .ndarray import NDArray, load
from . import random
from . import registry
from . import ndarray
# inherit str for backward compatibility
class InitDesc(str):
"""Descriptor for the initialization pattern.
Parameter
---------
name : str
Name of variable.
attrs : dict of str to str
Attributes of this variable taken from ``Symbol.attr_dict``.
global_init : Initializer
Global initializer to fallback to.
"""
def __new__(cls, name, attrs=None, global_init=None):
ret = super(InitDesc, cls).__new__(cls, name)
ret.attrs = attrs or {}
ret.global_init = global_init
return ret
class Initializer(object):
"""The base class of an initializer."""
def __init__(self, **kwargs):
self._kwargs = kwargs
self._verbose = False
self._print_func = None
def set_verbosity(self, verbose=False, print_func=None):
"""Switch on/off verbose mode
Parameters
----------
verbose : bool
switch on/off verbose mode
print_func : function
A function that computes statistics of initialized arrays.
Takes an `NDArray` and returns an `str`. Defaults to mean
absolute value str((|x|/size(x)).asscalar()).
"""
self._verbose = verbose
if print_func is None:
def asum_stat(x):
"""returns |x|/size(x), async execution."""
return str((ndarray.norm(x)/sqrt(x.size)).asscalar())
print_func = asum_stat
self._print_func = print_func
return self
def _verbose_print(self, desc, init, arr):
"""Internal verbose print function
Parameters
----------
desc : InitDesc or str
name of the array
init : str
initializer pattern
arr : NDArray
initialized array
"""
if self._verbose and self._print_func:
logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
def dumps(self):
"""Saves the initializer to string
Returns
-------
str
JSON formatted string that describes the initializer.
Examples
--------
>>> # Create initializer and retrieve its parameters
...
>>> init = mx.init.Normal(0.5)
>>> init.dumps()
'["normal", {"sigma": 0.5}]'
>>> init = mx.init.Xavier(factor_type="in", magnitude=2.34)
>>> init.dumps()
'["xavier", {"rnd_type": "uniform", "magnitude": 2.34, "factor_type": "in"}]'
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, desc, arr):
"""Initialize an array
Parameters
----------
desc : InitDesc
Initialization pattern descriptor.
arr : NDArray
The array to be initialized.
"""
if not isinstance(desc, InitDesc):
self._legacy_init(desc, arr)
return
if desc.global_init is None:
desc.global_init = self
init = desc.attrs.get('__init__', "")
if init:
# when calling Variable initializer
create(init)._init_weight(desc, arr)
self._verbose_print(desc, init, arr)
else:
# register nnvm::FSetInputVariableAttrs in the backend for new patterns
# don't add new cases here.
if desc.endswith('weight'):
self._init_weight(desc, arr)
self._verbose_print(desc, 'weight', arr)
elif desc.endswith('bias'):
self._init_bias(desc, arr)
self._verbose_print(desc, 'bias', arr)
elif desc.endswith('gamma'):
self._init_gamma(desc, arr)
self._verbose_print(desc, 'gamma', arr)
elif desc.endswith('beta'):
self._init_beta(desc, arr)
self._verbose_print(desc, 'beta', arr)
else:
self._init_default(desc, arr)
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
"""
warnings.warn(
"\033[91mCalling initializer with init(str, NDArray) has been deprecated." \
"please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m",
DeprecationWarning, stacklevel=3)
if not isinstance(name, string_types):
raise TypeError('name must be string')
if not isinstance(arr, NDArray):
raise TypeError('arr must be NDArray')
if name.startswith('upsampling'):
self._init_bilinear(name, arr)
elif name.startswith('stn_loc') and name.endswith('weight'):
self._init_zero(name, arr)
elif name.startswith('stn_loc') and name.endswith('bias'):
self._init_loc_bias(name, arr)
elif name.endswith('bias'):
self._init_bias(name, arr)
elif name.endswith('gamma'):
self._init_gamma(name, arr)
elif name.endswith('beta'):
self._init_beta(name, arr)
elif name.endswith('weight'):
self._init_weight(name, arr)
elif name.endswith("moving_mean"):
self._init_zero(name, arr)
elif name.endswith("moving_var"):
self._init_one(name, arr)
elif name.endswith("moving_inv_var"):
self._init_zero(name, arr)
elif name.endswith("moving_avg"):
self._init_zero(name, arr)
else:
self._init_default(name, arr)
def _init_bilinear(self, _, arr):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
f = np.ceil(shape[3] / 2.)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i / shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
def _init_loc_bias(self, _, arr):
shape = arr.shape
assert(shape[0] == 6)
arr[:] = np.array([1.0, 0, 0, 0, 1.0, 0])
def _init_zero(self, _, arr):
arr[:] = 0.0
def _init_one(self, _, arr):
arr[:] = 1.0
def _init_bias(self, _, arr):
arr[:] = 0.0
def _init_gamma(self, _, arr):
arr[:] = 1.0
def _init_beta(self, _, arr):
arr[:] = 0.0
def _init_weight(self, name, arr):
"""Abstract method to Initialize weight."""
raise NotImplementedError("Must override it")
def _init_default(self, name, _):
raise ValueError(
'Unknown initialization pattern for %s. ' \
'Default initialization is now limited to '\
'"weight", "bias", "gamma" (1.0), and "beta" (0.0).' \
'Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern' % name)
# pylint: disable=invalid-name
_register = registry.get_register_func(Initializer, 'initializer')
alias = registry.get_alias_func(Initializer, 'initializer')
create = registry.get_create_func(Initializer, 'initializer')
# pylint: enable=invalid-name
def register(klass):
"""Registers a custom initializer.
Custom initializers can be created by extending `mx.init.Initializer` and implementing the
required functions like `_init_weight` and `_init_bias`. The created initializer must be
registered using `mx.init.register` before it can be called by name.
Parameters
----------
klass : class
A subclass of `mx.init.Initializer` that needs to be registered as a custom initializer.
Example
-------
>>> # Create and register a custom initializer that
... # initializes weights to 0.1 and biases to 1.
...
>>> @mx.init.register
... @alias('myinit')
... class CustomInit(mx.init.Initializer):
... def __init__(self):
... super(CustomInit, self).__init__()
... def _init_weight(self, _, arr):
... arr[:] = 0.1
... def _init_bias(self, _, arr):
... arr[:] = 1
...
>>> # Module is an instance of 'mxnet.module.Module'
...
>>> module.init_params("custominit")
>>> # module.init_params("myinit")
>>> # module.init_params(CustomInit())
"""
return _register(klass)
class Load(object):
"""Initializes variables by loading data from file or dict.
**Note** Load will drop ``arg:`` or ``aux:`` from name and
initialize the variables that match with the prefix dropped.
Parameters
----------
param: str or dict of str->`NDArray`
Parameter file or dict mapping name to NDArray.
default_init: Initializer
Default initializer when name is not found in `param`.
verbose: bool
Flag for enabling logging of source when initializing.
"""
def __init__(self, param, default_init=None, verbose=False):
if isinstance(param, str):
param = load(param)
assert isinstance(param, dict)
self.param = {}
for name, arr in param.items():
if name.startswith('arg:') or name.startswith('aux:'):
self.param[name[4:]] = arr
else:
self.param[name] = arr
self.default_init = default_init
self.verbose = verbose
def __call__(self, name, arr):
if name in self.param:
assert arr.shape == self.param[name].shape, \
'Parameter %s cannot be initialized from loading. '%name + \
'Shape mismatch, target %s vs loaded %s'%(str(arr.shape),
self.param[name].shape)
arr[:] = self.param[name]
if self.verbose:
logging.info('Initialized %s by loading', name)
else:
assert self.default_init is not None, \
"Cannot Initialize %s. Not found in loaded param "%name + \
"and no default Initializer is provided."
self.default_init(name, arr)
if self.verbose:
logging.info('Initialized %s by default', name)
class Mixed(object):
"""Initialize parameters using multiple initializers.
Parameters
----------
patterns: list of str
List of regular expressions matching parameter names.
initializers: list of Initializer
List of initializers corresponding to `patterns`.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize biases to zero
... # and every other parameter to random values with uniform distribution.
...
>>> init = mx.initializer.Mixed(['bias', '.*'], [mx.init.Zero(), mx.init.Uniform(0.1)])
>>> module.init_params(init)
>>>
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected1_weight
[[ 0.0097627 0.01856892 0.04303787]]
fullyconnected1_bias
[ 0.]
"""
def __init__(self, patterns, initializers):
assert len(patterns) == len(initializers)
self.map = list(zip([re.compile(p) for p in patterns], initializers))
def __call__(self, name, arr):
for prog, init in self.map:
if prog.match(name):
init(name, arr)
return
raise ValueError('Parameter name %s did not match any pattern. Consider' +
'add a ".*" pattern at the and with default Initializer.')
@register
@alias("zeros")
class Zero(Initializer):
"""Initializes weights to zero.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights to zero.
...
>>> init = mx.initializer.Zero()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0. 0. 0.]]
"""
def __init__(self):
super(Zero, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 0
@register
@alias("ones")
class One(Initializer):
"""Initializes weights to one.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights to one.
...
>>> init = mx.initializer.One()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 1. 1. 1.]]
"""
def __init__(self):
super(One, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 1
@register
class Constant(Initializer):
"""Initializes the weights to a given value.
The value passed in can be a scalar or a NDarray that matches the shape
of the parameter to be set.
Parameters
----------
value : float, NDArray
Value to set.
"""
def __init__(self, value):
super(Constant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = self.value
@register
class Uniform(Initializer):
"""Initializes weights with random values uniformly sampled from a given range.
Parameters
----------
scale : float, optional
The bound on the range of the generated random values.
Values are generated from the range [-`scale`, `scale`].
Default scale is 0.07.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights
>>> # to random values uniformly sampled between -0.1 and 0.1.
...
>>> init = mx.init.Uniform(0.1)
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0.01360891 -0.02144304 0.08511933]]
"""
def __init__(self, scale=0.07):
super(Uniform, self).__init__(scale=scale)
self.scale = scale
def _init_weight(self, _, arr):
random.uniform(-self.scale, self.scale, out=arr)
@register
class Normal(Initializer):
"""Initializes weights with random values sampled from a normal distribution
with a mean of zero and standard deviation of `sigma`.
Parameters
----------
sigma : float, optional
Standard deviation of the normal distribution.
Default standard deviation is 0.01.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights
>>> # to random values sampled from a normal distribution.
...
>>> init = mx.init.Normal(0.5)
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[-0.3214761 -0.12660924 0.53789419]]
"""
def __init__(self, sigma=0.01):
super(Normal, self).__init__(sigma=sigma)
self.sigma = sigma
def _init_weight(self, _, arr):
random.normal(0, self.sigma, out=arr)
@register
class Orthogonal(Initializer):
"""Initialize weight as orthogonal matrix.
This initializer implements *Exact solutions to the nonlinear dynamics of
learning in deep linear neural networks*, available at
https://arxiv.org/abs/1312.6120.
Parameters
----------
scale : float optional
Scaling factor of weight.
rand_type: string optional
Use "uniform" or "normal" random number to initialize weight.
"""
def __init__(self, scale=1.414, rand_type="uniform"):
super(Orthogonal, self).__init__(scale=scale, rand_type=rand_type)
self.scale = scale
self.rand_type = rand_type
def _init_weight(self, _, arr):
nout = arr.shape[0]
nin = np.prod(arr.shape[1:])
if self.rand_type == "uniform":
tmp = random.uniform(-1.0, 1.0, shape=(nout, nin)).asnumpy()
elif self.rand_type == "normal":
tmp = random.normal(0.0, 1.0, shape=(nout, nin)).asnumpy()
u, _, v = np.linalg.svd(tmp, full_matrices=False) # pylint: disable=invalid-name
if u.shape == tmp.shape:
res = u
else:
res = v
res = self.scale * res.reshape(arr.shape)
arr[:] = res
@register
class Xavier(Initializer):
"""Returns an initializer performing "Xavier" initialization for weights.
This initializer is designed to keep the scale of gradients roughly the same
in all layers.
By default, `rnd_type` is ``'uniform'`` and `factor_type` is ``'avg'``,
the initializer fills the weights with random numbers in the range
of :math:`[-c, c]`, where :math:`c = \\sqrt{\\frac{3.}{0.5 * (n_{in} + n_{out})}}`.
:math:`n_{in}` is the number of neurons feeding into weights, and :math:`n_{out}` is
the number of neurons the result is fed to.
If `rnd_type` is ``'uniform'`` and `factor_type` is ``'in'``,
the :math:`c = \\sqrt{\\frac{3.}{n_{in}}}`.
Similarly when `factor_type` is ``'out'``, the :math:`c = \\sqrt{\\frac{3.}{n_{out}}}`.
If `rnd_type` is ``'gaussian'`` and `factor_type` is ``'avg'``,
the initializer fills the weights with numbers from normal distribution with
a standard deviation of :math:`\\sqrt{\\frac{3.}{0.5 * (n_{in} + n_{out})}}`.
Parameters
----------
rnd_type: str, optional
Random generator type, can be ``'gaussian'`` or ``'uniform'``.
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
magnitude: float, optional
Scale of random number.
"""
def __init__(self, rnd_type="uniform", factor_type="avg", magnitude=3):
super(Xavier, self).__init__(rnd_type=rnd_type, factor_type=factor_type,
magnitude=magnitude)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, name, arr):
shape = arr.shape
hw_scale = 1.
if len(shape) < 2:
raise ValueError('Xavier initializer cannot be applied to vector {0}. It requires at'
' least 2D.'.format(name))
if len(shape) > 2:
hw_scale = np.prod(shape[2:])
fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
factor = 1.
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError("Incorrect factor type")
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
random.uniform(-scale, scale, out=arr)
elif self.rnd_type == "gaussian":
random.normal(0, scale, out=arr)
else:
raise ValueError("Unknown random type")
@register
class MSRAPrelu(Xavier):
"""Initialize the weight according to a MSRA paper.
This initializer implements *Delving Deep into Rectifiers: Surpassing
Human-Level Performance on ImageNet Classification*, available at
https://arxiv.org/abs/1502.01852.
This initializer is proposed for initialization related to ReLu activation,
it maked some changes on top of Xavier method.
Parameters
----------
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
slope: float, optional
initial slope of any PReLU (or similar) nonlinearities.
"""
def __init__(self, factor_type="avg", slope=0.25):
magnitude = 2. / (1 + slope ** 2)
super(MSRAPrelu, self).__init__("gaussian", factor_type, magnitude)
self._kwargs = {'factor_type': factor_type, 'slope': slope}
@register
class Bilinear(Initializer):
"""Initialize weight for upsampling layers."""
def __init__(self):
super(Bilinear, self).__init__()
def _init_weight(self, _, arr):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
f = np.ceil(shape[3] / 2.)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i / shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
@register
class LSTMBias(Initializer):
"""Initialize all biases of an LSTMCell to 0.0 except for
the forget gate whose bias is set to custom value.
Parameters
----------
forget_bias: float, default 1.0
bias for the forget gate. Jozefowicz et al. 2015 recommends
setting this to 1.0.
"""
def __init__(self, forget_bias=1.0):
super(LSTMBias, self).__init__(forget_bias=forget_bias)
self.forget_bias = forget_bias
def _init_weight(self, name, arr):
arr[:] = 0.0
# in the case of LSTMCell the forget gate is the second
# gate of the 4 LSTM gates, we modify the according values.
num_hidden = int(arr.shape[0] / 4)
arr[num_hidden:2*num_hidden] = self.forget_bias
@register
class FusedRNN(Initializer):
"""Initialize parameters for fused rnn layers.
Parameters
----------
init : Initializer
initializer applied to unpacked weights. Fall back to global
initializer if None.
num_hidden : int
should be the same with arguments passed to FusedRNNCell.
num_layers : int
should be the same with arguments passed to FusedRNNCell.
mode : str
should be the same with arguments passed to FusedRNNCell.
bidirectional : bool
should be the same with arguments passed to FusedRNNCell.
forget_bias : float
should be the same with arguments passed to FusedRNNCell.
"""
def __init__(self, init, num_hidden, num_layers, mode, bidirectional=False, forget_bias=1.0):
if isinstance(init, string_types):
klass, kwargs = json.loads(init)
init = registry._REGISTRY[klass.lower()](**kwargs)
super(FusedRNN, self).__init__(init=init.dumps() if init is not None else None,
num_hidden=num_hidden, num_layers=num_layers, mode=mode,
bidirectional=bidirectional, forget_bias=forget_bias)
self._init = init
self._num_hidden = num_hidden
self._num_layers = num_layers
self._mode = mode
self._bidirectional = bidirectional
self._forget_bias = forget_bias
def _init_weight(self, desc, arr): # pylint: disable=arguments-differ
from .rnn import rnn_cell
cell = rnn_cell.FusedRNNCell(self._num_hidden, self._num_layers,
self._mode, self._bidirectional,
forget_bias=self._forget_bias, prefix='')
args = cell.unpack_weights({'parameters': arr})
for name in args:
arg_desc = InitDesc(name, global_init=desc.global_init)
# for lstm bias, we use a custom initializer
# which adds a bias to the forget gate
if self._mode == 'lstm' and name.endswith("_f_bias"):
args[name][:] = self._forget_bias
elif self._init is None:
desc.global_init(arg_desc, args[name])
else:
self._init(arg_desc, args[name])
arr[:] = cell.pack_weights(args)['parameters']
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 HashData Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from datetime import timedelta
import struct
from pytidbrep.expcetion import InvalidRowData
from pytidbrep.pb_binlog.binlog_pb2 import DDL
from pytidbrep.pb_binlog.binlog_pb2 import DML
from pytidbrep.pb_binlog.binlog_pb2 import Delete
from pytidbrep.pb_binlog.binlog_pb2 import Insert
from pytidbrep.pb_binlog.binlog_pb2 import Update
from pytidbrep.pb_binlog.binlog_pb2 import column
SIGNMASK = 0x8000000000000000
DIGITS_PER_WORD = 9 # A word holds 9 digits.
WORD_SIZE = 4 # A word is 4 bytes int32.
DIG2BYTES = [0, 1, 1, 2, 2, 3, 3, 4, 4, 4]
TYPE_BIT = 16
TYPE_TINY = 1
TYPE_SHORT = 2
TYPE_INT24 = 9
TYPE_LONG = 3
TYPE_LONGLONG = 8
TYPE_FLOAT = 4
TYPE_DOUBLE = 5
TYPE_DECIMAL = 0
TYPE_NEWDECIMAL = 0xf6
TYPE_DATE = 10
TYPE_NEWDATE = 14
TYPE_DATETIME = 12
TYPE_TIMESTAMP = 7
TYPE_YEAR = 13
TYPE_DURATION = 11
TYPE_VARCHAR = 15
TYPE_VARSTRING = 0xfd
TYPE_STRING = 0xfe
TYPE_TINYBLOB = 0xf9
TYPE_BLOB = 0xfc
TYPE_MEDIUMBLOB = 0xfa
TYPE_LONGBLOB = 0xfb
TYPE_ENUM = 0xf7
TYPE_SET = 0xf8
TYPE_NULL = 6
TYPE_GEOMETRY = 0xff
MYSQL_TYPE_BIT_STR = "bit"
MYSQL_TYPE_TINYINT = "tinyint"
MYSQL_TYPE_SMALLINT = "smallint"
MYSQL_TYPE_MEDIUMINT = "mediumint"
MYSQL_TYPE_INT = "int"
MYSQL_TYPE_BIGINT = "bigint"
MYSQL_TYPE_DECIMAL = "decimal"
MYSQL_TYPE_FLOAT = "float"
MYSQL_TYPE_DOUBLE = "double"
MYSQL_TYPE_DATE = "date"
MYSQL_TYPE_DATETIME = "datetime"
MYSQL_TYPE_TIMESTAMP = "timestamp"
MYSQL_TYPE_TIME = "time"
MYSQL_TYPE_YEAR = "year"
MYSQL_TYPE_CHAR = "char"
MYSQL_TYPE_VARCHAR = "varchar"
MYSQL_TYPE_BINARY = "binary"
MYSQL_TYPE_VARBINARY = "varbinary"
MYSQL_TYPE_TINYBLOB = "tinyblob"
MYSQL_TYPE_TINYTEXT = "tinytext"
MYSQL_TYPE_BLOB = "blob"
MYSQL_TYPE_TEXT = "text"
MYSQL_TYPE_MEDIUMBLOB = "mediumblob"
MYSQL_TYPE_MEDIUMTEXT = "mediumtext"
MYSQL_TYPE_LONGBLOB = "longblob"
MYSQL_TYPE_LONGTEXT = "longtext"
MYSQL_TYPE_ENUM = "enum"
MYSQL_TYPE_GEOMETRY = "geometry"
MYSQL_TYPE_NULL = "null"
MYSQL_TYPE_VARSTRING = "var_string"
MYSQL_TYPE_UNSPECIFIED = "unspecified"
def get_unicode(s):
if isinstance(s, unicode):
return s
else:
return s.decode('utf8')
def format_column(t, v):
_, mysql_type = t
if v is None:
return "NULL"
elif mysql_type in (MYSQL_TYPE_TINYTEXT, MYSQL_TYPE_MEDIUMTEXT,
MYSQL_TYPE_LONGTEXT, MYSQL_TYPE_TEXT, MYSQL_TYPE_CHAR,
MYSQL_TYPE_VARCHAR):
return '"%s"' % get_unicode(v)
elif mysql_type in (MYSQL_TYPE_BINARY, MYSQL_TYPE_VARBINARY,
MYSQL_TYPE_TINYBLOB, MYSQL_TYPE_MEDIUMBLOB,
MYSQL_TYPE_LONGBLOB, MYSQL_TYPE_BLOB):
return '"%s"' % v.encode('hex')
elif mysql_type in (MYSQL_TYPE_DATE, MYSQL_TYPE_DATETIME,
MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_TIME,
MYSQL_TYPE_YEAR):
return '"%s"' % str(v)
else:
return str(v)
def int2byte(i):
return struct.pack(b"!B", i)
def read_be_uint64(buf):
return struct.unpack(b">Q", buf)[0]
def read_int8(buf):
return struct.unpack(b">b", buf)[0]
def read_uint8(buf):
return struct.unpack(b">B", buf)[0]
def read_uvarint(buf):
'''
read_uvarint decodes a uint64 from buf and returns that value and the
number of bytes read (> 0). If an error occurred, the value is 0
and the number of bytes n is <= 0 meaning:
n == 0: buf too small
n < 0: value larger than 64 bits (overflow)
and -n is the number of bytes read
'''
x = 0
s = 0
for i in range(len(buf)):
b = read_uint8(buf[i])
if b < 0x80:
if i > 9 or i == 9 and b > 1:
return 0, -(i + 1) # overflow
return x | (b << s), i + 1
x |= (b & 0x7f) << s
s += 7
return 0, 0
def read_varint(buf):
'''
read_varint decodes an int64 from buf and returns that value and the
number of bytes read (> 0). If an error occurred, the value is 0
and the number of bytes n is <= 0 with the following meaning:
n == 0: buf too small
n < 0: value larger than 64 bits (overflow)
and -n is the number of bytes read
'''
ux, n = read_uvarint(buf) # ok to continue in presence of error
x = ux >> 1
if ux & 1 != 0:
x = ~x
return x, n
def read_int32_word(buf, size):
if read_int8(buf[0]) & 0x80 > 0:
pad = 0xFF
else:
pad = 0
tmp = bytearray(WORD_SIZE)
for i in range(WORD_SIZE - size):
tmp[i] = pad
offset = WORD_SIZE - size
for i in range(offset, WORD_SIZE):
tmp[i] = buf[i - offset]
x = struct.unpack(b">i", str(tmp))[0]
return x
def decimal_bin_size(precision, frac):
digits_int = precision - frac
words_int = digits_int / DIGITS_PER_WORD
words_frac = frac / DIGITS_PER_WORD
xint = digits_int - words_int * DIGITS_PER_WORD
xfrac = frac - words_frac * DIGITS_PER_WORD
return words_int * WORD_SIZE + DIG2BYTES[xint] + \
words_frac * WORD_SIZE + DIG2BYTES[xfrac]
class BinLogEvent(object):
def __init__(self, binlog):
self.tp = binlog.tp
self.commit_ts = binlog.commit_ts
@classmethod
def type_name(cls, tp):
if tp == DML:
return "DML"
elif tp == DDL:
return "DDL"
else:
return "Unknown"
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "%s: %s" % (self.commit_ts, self.type_name(self.tp))
class DDLEvent(BinLogEvent):
def __init__(self, binlog):
super(DDLEvent, self).__init__(binlog)
self._statement = get_unicode(binlog.ddl_query)
@property
def statement(self):
'''Return DDL statement
@return: A unicode string of DDL statement
'''
return self._statement
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "%s: %s" % (super(DDLEvent, self).__unicode__(), self.statement)
class XidEvent(BinLogEvent):
"""A COMMIT event
"""
def __init__(self, ts):
self.commit_ts = ts
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "Transaction at: %s" % (self.commit_ts)
class RowsEvent(BinLogEvent):
NIL_FLAG = 0
BYTES_FLAG = 1
COMPACTBYTES_FLAG = 2
INT_FLAG = 3
UINT_FLAG = 4
FLOAT_FLAG = 5
DECIMAL_FLAG = 6
DURATION_FLAG = 7
VARINT_FLAG = 8
UVARINT_FLAG = 9
MAX_FLAG = 250
def __init__(self, binlog, event):
super(RowsEvent, self).__init__(binlog)
self._schema = get_unicode(event.schema_name)
self._table = get_unicode(event.table_name)
self._dml_tp = event.tp
@property
def schema(self):
'''Return table's schema name of the event
@return: A unicode string of schema
'''
return self._schema
@property
def table(self):
'''Return table's name of the event
@return: A unicode string of table name
'''
return self._table
@classmethod
def dml_type_name(cls, tp):
if tp == Insert:
return "INSERT"
elif tp == Update:
return "UPDATE"
elif tp == Delete:
return "DELETE"
else:
return "UNKNOWN"
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
parent = super(RowsEvent, self).__unicode__()
return "%s: %s %s.%s" % (parent, self.dml_type_name(self._dml_tp),
self.schema, self.table)
@classmethod
def parse_int(cls, row, pos, size):
if size - pos < 8:
raise InvalidRowData('insufficient bytes to decode value')
u = read_be_uint64(row[pos:pos + 8])
v = SIGNMASK ^ u
return v, 8
@classmethod
def parse_uint(cls, row, pos, size):
if size - pos < 8:
raise InvalidRowData('insufficient bytes to decode value')
v = read_be_uint64(row[pos:pos + 8])
return v, 8
@classmethod
def parse_varint(cls, row, pos, size):
v, n = read_varint(row[pos:pos + 10]) # needs at most 10 bytes
if n > 0:
return v, n
if n < 0:
raise InvalidRowData("value larger than 64 bits")
raise InvalidRowData("insufficient bytes to decode value")
@classmethod
def parse_uvarint(cls, row, pos, size):
v, n = read_uvarint(row[pos:pos + 10]) # needs at most 10 bytes
if n > 0:
return v, n
if n < 0:
raise InvalidRowData("value larger than 64 bits")
raise InvalidRowData("insufficient bytes to decode value")
@classmethod
def parse_float(cls, row, pos, size):
if size - pos < 8:
raise InvalidRowData('insufficient bytes to decode value')
tmp = bytearray(row[pos:pos + 8])
if tmp[0] & 0x80 > 0:
tmp[0] &= 0x7F
else:
u = struct.unpack(b">Q", str(tmp))[0]
u = ~u
tmp = struct.pack(b">q", u)
v = struct.unpack(b">d", str(tmp))[0]
return v, 8
@classmethod
def parse_bytes(cls, row, pos, size):
ENC_GROUP_SIZE = 8
ENC_MARKER = 0xFF
ENC_PAD = 0x0
old_pos = pos
retval = ""
while True:
if size - pos < ENC_GROUP_SIZE + 1:
raise InvalidRowData("insufficient bytes to decode value")
group = row[pos:pos + ENC_GROUP_SIZE]
marker = row[pos + ENC_GROUP_SIZE]
pad_count = ENC_MARKER - marker
if pad_count > ENC_GROUP_SIZE:
raise InvalidRowData("invalid marker byte")
real_roup_size = ENC_GROUP_SIZE - pad_count
retval += group[:real_roup_size]
pos += ENC_GROUP_SIZE + 1
if pad_count != 0:
pad_byte = ENC_PAD
for v in group[real_roup_size:]:
if v != pad_byte:
raise InvalidRowData("invalid padding byte")
break
return retval, pos - old_pos
@classmethod
def parse_compact_bytes(cls, row, pos, size):
n, s = cls.parse_varint(row, pos, size)
if size - pos - s < n:
raise InvalidRowData(
"insufficient bytes to decode value, expected length: %s", n)
return row[pos + s:pos + s + n], s + n
@classmethod
def parse_decimal(cls, row, pos, size):
if size - pos < 3:
raise InvalidRowData('insufficient bytes to decode value')
precision = read_int8(row[pos])
frac = read_int8(row[pos + 1])
bin_size = decimal_bin_size(precision, frac)
if size - pos < bin_size + 2:
raise InvalidRowData("insufficient bytes to decode value")
bin = row[pos + 2:pos + 2 + bin_size]
bin_pos = 0
if read_int8(bin[bin_pos]) & 0x80 > 0:
negitive = False
retval = ''
else:
negitive = True
retval = '-'
bin = bytearray(bin)
bin[0] ^= 0x80
bin = str(bin)
# The number of *decimal* digits before the point.
digits_int = precision - frac
# The number of 32bit words before the point.
words_int = digits_int / DIGITS_PER_WORD
# The number of leading *decimal* digits not in a word
leading_digits = digits_int - words_int * DIGITS_PER_WORD
# The number of 32bit words after the point.
words_frac = frac / DIGITS_PER_WORD
# The number of trailing *decimal* digits not in a word
trailing_digits = frac - words_frac * DIGITS_PER_WORD
if leading_digits > 0:
i = DIG2BYTES[leading_digits]
x = read_int32_word(bin[bin_pos:], i)
x = ~x if negitive else x
retval += str(x)
bin_pos += i
for i in range(0, words_int * WORD_SIZE, WORD_SIZE):
x = read_int32_word(bin[bin_pos + i:], WORD_SIZE)
x = ~x if negitive else x
retval += str(x)
bin_pos += words_int * WORD_SIZE
retval += '.'
for i in range(0, words_frac * WORD_SIZE, WORD_SIZE):
x = read_int32_word(bin[bin_pos + i:], WORD_SIZE)
x = ~x if negitive else x
retval += str(x)
bin_pos += words_frac * WORD_SIZE
if trailing_digits > 0:
i = DIG2BYTES[trailing_digits]
x = read_int32_word(bin[bin_pos:], i)
x = ~x if negitive else x
retval += str(x)
return retval, bin_size + 2
@classmethod
def parse_duration(cls, row, pos, size):
v, s = cls.parse_int(row, pos, size)
d = timedelta(microseconds=v / 1000)
return d, s
@classmethod
def parse_one_column(cls, value):
pos = 0
size = len(value)
if size - pos < 1:
raise InvalidRowData("Invalid encoded key")
flag = read_int8(value[pos])
pos += 1
if cls.INT_FLAG == flag:
v, _ = cls.parse_int(value, pos, size)
elif cls.UINT_FLAG == flag:
v, _ = cls.parse_uint(value, pos, size)
elif cls.VARINT_FLAG == flag:
v, _ = cls.parse_varint(value, pos, size)
elif cls.UVARINT_FLAG == flag:
v, _ = cls.parse_uvarint(value, pos, size)
elif cls.FLOAT_FLAG == flag:
v, _ = cls.parse_float(value, pos, size)
elif cls.BYTES_FLAG == flag:
v, _ = cls.parse_bytes(value, pos, size)
elif cls.COMPACTBYTES_FLAG == flag:
v, _ = cls.parse_compact_bytes(value, pos, size)
elif cls.DECIMAL_FLAG == flag:
v, _ = cls.parse_decimal(value, pos, size)
elif cls.DURATION_FLAG == flag:
v, _ = cls.parse_duration(value, pos, size)
elif cls.NIL_FLAG == flag:
v = None
else:
raise InvalidRowData("Invalid encoded key")
return v
@classmethod
def parse_insert_and_delete_row(cls, row):
values = {}
types = {}
for c in row:
col = column.FromString(c)
tp = read_uint8(col.tp)
mysql_type = col.mysql_type
name = get_unicode(col.name)
value = cls.parse_one_column(col.value)
if value is not None and \
mysql_type in (
MYSQL_TYPE_TINYTEXT,
MYSQL_TYPE_MEDIUMTEXT,
MYSQL_TYPE_LONGTEXT,
MYSQL_TYPE_TEXT,
MYSQL_TYPE_CHAR,
MYSQL_TYPE_VARCHAR
):
value = value.decode('utf-8')
values[name] = value
types[name] = (tp, mysql_type)
return types, values
@classmethod
def parse_update_row(cls, row):
old_values = {}
new_values = {}
types = {}
for c in row:
col = column.FromString(c)
tp = read_uint8(col.tp)
mysql_type = col.mysql_type
name = unicode(col.name)
value = cls.parse_one_column(col.value)
changed_value = cls.parse_one_column(col.changed_value)
old_values[name] = value
new_values[name] = changed_value
types[name] = (tp, mysql_type)
return types, old_values, new_values
class DeleteRowsEvent(RowsEvent):
"""This event is trigger when a row in the database is removed
For each row you have a hash with a single key:
values which contain the data of the removed line.
"""
def __init__(self, binlog, event):
super(DeleteRowsEvent, self).__init__(binlog, event)
self._types, self._values = self.parse_insert_and_delete_row(event.row)
@property
def types(self):
return self._types
@property
def values(self):
return self._values
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
parent = super(DeleteRowsEvent, self).__unicode__()
values = self.values
types = self.types
s = ''
for col in values:
s += "%s %s, " % (col, format_column(types[col], values[col]))
return "%s: %s" % (parent, s)
class WriteRowsEvent(RowsEvent):
"""This event is triggered when a row in database is added
For each row you have a hash with a single key:
values which contain the data of the new line.
"""
def __init__(self, binlog, event):
super(WriteRowsEvent, self).__init__(binlog, event)
self._types, self._values = self.parse_insert_and_delete_row(event.row)
@property
def types(self):
return self._types
@property
def values(self):
return self._values
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
parent = super(WriteRowsEvent, self).__unicode__()
values = self.values
types = self.types
s = ''
for col in values:
s += "%s %s, " % (col, format_column(types[col], values[col]))
return "%s: %s" % (parent, s)
class UpdateRowsEvent(RowsEvent):
"""This event is triggered when a row in the database is changed
For each row you got a hash with two keys:
* before_values
* after_values
"""
def __init__(self, binlog, event):
super(UpdateRowsEvent, self).__init__(binlog, event)
self._types, self._before_values, self._after_values = \
self.parse_update_row(event.row)
@property
def types(self):
return self._types
@property
def before_values(self):
return self._before_values
@property
def after_values(self):
return self._after_values
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
parent = super(UpdateRowsEvent, self).__unicode__()
before_values = self.before_values
after_values = self.after_values
types = self.types
s = ''
for col in before_values:
s += "%s %s => %s, " % (
col, format_column(types[col], before_values[col]),
format_column(types[col], after_values[col]))
return "%s: %s" % (parent, s)
|
|
# Copyright 2014-2015 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines all "built-in" classes that implement declarations compare
functionality according to some criteria
"""
import os
import re
from . import algorithm
from . import variable
from . import namespace
from . import calldef
from . import cpptypes
from . import templates
from . import class_declaration
from pygccxml import utils
class matcher_base_t(object):
"""matcher_base_t class defines interface for classes that will implement
compare functionality according to some criteria.
"""
def __init__(self):
object.__init__(self)
def __call__(self, decl):
raise NotImplementedError(
"matcher must always implement the __call__() method.")
def __invert__(self):
"""not-operator (~)"""
return not_matcher_t(self)
def __and__(self, other):
"""and-operator (&)"""
return and_matcher_t([self, other])
def __or__(self, other):
"""or-operator (|)"""
return or_matcher_t([self, other])
def __str__(self):
return "base class for all matchers"
class and_matcher_t(matcher_base_t):
"""
Combine several other matchers with "&" (and) operator.
For example: find all private functions with name XXX
.. code-block:: python
matcher = access_type_matcher_t( 'private' ) & \
calldef_matcher_t( name='XXX' )
"""
def __init__(self, matchers):
matcher_base_t.__init__(self)
self.matchers = matchers
def __call__(self, decl):
for matcher in self.matchers:
if not matcher(decl):
return False
return True
def __str__(self):
return " & ".join(["(%s)" % str(x) for x in self.matchers])
class or_matcher_t(matcher_base_t):
"""Combine several other matchers with "|" (or) operator.
For example: find all functions and variables with name 'XXX'
.. code-block:: python
matcher = variable_matcher_t( name='XXX' ) | \
calldef_matcher_t( name='XXX' )
"""
def __init__(self, matchers):
matcher_base_t.__init__(self)
self.matchers = matchers
def __call__(self, decl):
for matcher in self.matchers:
if matcher(decl):
return True
return False
def __str__(self):
return " | ".join(["(%s)" % str(x) for x in self.matchers])
class not_matcher_t(matcher_base_t):
"""
return the inverse result of a matcher
For example: find all public and protected declarations
.. code-block:: python
matcher = ~access_type_matcher_t( 'private' )
"""
def __init__(self, matcher):
matcher_base_t.__init__(self)
self.matcher = matcher
def __call__(self, decl):
return not self.matcher(decl)
def __str__(self):
return "~(%s)" % str(self.matcher)
class declaration_matcher_t(matcher_base_t):
"""
Instance of this class will match declarations by next criteria:
- declaration name, also could be fully qualified name
Example: `wstring` or `::std::wstring`
- declaration type
Example: :class:`class_t`, :class:`namespace_t`,
:class:`enumeration_t`
- location within file system ( file or directory )
"""
def __init__(
self,
name=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param decl_type: declaration type to match by. For example
:class:`enumeration_t`.
:type decl_type: any class that derives from :class:`declaration_t`
class
:param name: declaration name, could be full name.
:type name: str
:param header_dir: absolute directory path
:type header_dir: str
:param header_file: absolute file path
:type header_file: str
"""
# An other option is that pygccxml will create absolute path using
# os.path.abspath function. But I think this is just wrong, because
# abspath builds path using current working directory. This behavior
# is fragile and very difficult to find a bug.
matcher_base_t.__init__(self)
self.decl_type = decl_type
self.__name = None
self.__opt_is_tmpl_inst = None
self.__opt_tmpl_name = None
self.__opt_is_full_name = None
self.__decl_name_only = None
# Set the name through the setter.
self.name = name
self.header_dir = header_dir
self.header_file = header_file
if self.header_dir:
self.header_dir = utils.normalize_path(self.header_dir)
if not os.path.isabs(self.header_dir):
raise RuntimeError(
"Path to header directory should be absolute!")
if self.header_file:
self.header_file = utils.normalize_path(self.header_file)
if not os.path.isabs(self.header_file):
raise RuntimeError("Path to header file should be absolute!")
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
if not self.__name:
self.__opt_is_tmpl_inst = None
self.__opt_tmpl_name = None
self.__opt_is_full_name = None
self.__decl_name_only = None
else:
self.__opt_is_tmpl_inst = templates.is_instantiation(self.__name)
self.__opt_tmpl_name = templates.name(self.__name)
if self.__opt_is_tmpl_inst:
if '::' in self.__opt_tmpl_name:
self.__opt_is_full_name = True
self.__decl_name_only = \
self.__opt_tmpl_name.split('::')[-1]
else:
self.__opt_is_full_name = False
self.__decl_name_only = self.__opt_tmpl_name
self.__name = templates.normalize(name)
else:
if '::' in self.__name:
self.__opt_is_full_name = True
self.__decl_name_only = self.__name.split('::')[-1]
else:
self.__opt_is_full_name = False
self.__decl_name_only = self.__name
def __str__(self):
msg = []
if self.decl_type is not None:
msg.append('(decl type==%s)' % self.decl_type.__name__)
if self.name is not None:
msg.append('(name==%s)' % self.name)
if self.header_dir is not None:
msg.append('(header dir==%s)' % self.header_dir)
if self.header_file is not None:
msg.append('(header file==%s)' % self.header_file)
if not msg:
msg.append('any')
return ' and '.join(msg)
def __call__(self, decl):
if self.decl_type is not None:
if not isinstance(decl, self.decl_type):
return False
if self.name is not None:
if not self.check_name(decl):
return False
if self.header_dir is not None:
if decl.location:
decl_dir = os.path.abspath(
os.path.dirname(decl.location.file_name))
decl_dir = utils.normalize_path(decl_dir)
if decl_dir[:len(self.header_dir)] != self.header_dir:
return False
else:
return False
if self.header_file is not None:
if decl.location:
decl_file = os.path.abspath(decl.location.file_name)
decl_file = utils.normalize_path(decl_file)
if decl_file != self.header_file:
return False
else:
return False
return True
def check_name(self, decl):
assert self.name is not None
if self.__opt_is_tmpl_inst:
if not self.__opt_is_full_name:
if self.name != templates.normalize(decl.name) \
and self.name != templates.normalize(decl.partial_name):
return False
else:
if self.name != templates.normalize(
algorithm.full_name(decl, with_defaults=True)) \
and self.name != templates.normalize(
algorithm.full_name(decl, with_defaults=False)):
return False
else:
if not self.__opt_is_full_name:
if self.name != decl.name and self.name != decl.partial_name:
return False
else:
if self.name != algorithm.full_name(decl, with_defaults=True) \
and self.name != algorithm.full_name(
decl, with_defaults=False):
return False
return True
def is_full_name(self):
return self.__opt_is_full_name
@property
def decl_name_only(self):
return self.__decl_name_only
class variable_matcher_t(declaration_matcher_t):
"""
Instance of this class will match variables by next criteria:
- :class:`declaration_matcher_t` criteria
- variable type. Example: :class:`int_t` or 'int'
"""
def __init__(
self,
name=None,
type=None,
header_dir=None,
header_file=None):
"""
:param type: variable type
:type type: string or instance of :class:`type_t` derived class
"""
declaration_matcher_t.__init__(
self,
name=name,
decl_type=variable.variable_t,
header_dir=header_dir,
header_file=header_file)
self.type = type
def __call__(self, decl):
if not super(variable_matcher_t, self).__call__(decl):
return False
if self.type is not None:
if isinstance(self.type, cpptypes.type_t):
if self.type != decl.type:
return False
else:
if self.type != decl.type.decl_string:
return False
return True
def __str__(self):
msg = [super(variable_matcher_t, self).__str__()]
if msg == ['any']:
msg = []
if self.type is not None:
msg.append('(value type==%s)' % str(self.type))
if not msg:
msg.append('any')
return ' and '.join(msg)
class namespace_matcher_t(declaration_matcher_t):
"""Instance of this class will match namespaces by name."""
def __init__(self, name=None):
declaration_matcher_t.__init__(
self,
name=name,
decl_type=namespace.namespace_t)
def __call__(self, decl):
if self.name and decl.name == '':
# unnamed namespace have same name as thier parent, we should
# prevent this happens. The price is: user should search for
# unnamed namespace directly.
return False
return super(namespace_matcher_t, self).__call__(decl)
class calldef_matcher_t(declaration_matcher_t):
"""
Instance of this class will match callable by the following criteria:
* :class:`declaration_matcher_t` criteria
* return type. For example: :class:`int_t` or 'int'
* argument types
"""
def __init__(
self,
name=None,
return_type=None,
arg_types=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param return_type: callable return type
:type return_type: string or instance of :class:`type_t` derived class
:type arg_types: list
:param arg_types: list of function argument types. `arg_types` can
contain.
Any item within the list could be string or instance
of :class:`type_t` derived class. If you don't want
some argument to participate in match you can put
None.
For example:
.. code-block:: python
calldef_matcher_t( arg_types=[ 'int &', None ] )
will match all functions that takes 2 arguments, where the first one is
reference to integer and second any
"""
if None is decl_type:
decl_type = calldef.calldef_t
declaration_matcher_t.__init__(
self,
name=name,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file)
self.return_type = return_type
self.arg_types = arg_types
def __call__(self, decl):
if not super(calldef_matcher_t, self).__call__(decl):
return False
if self.return_type is not None \
and not self.__compare_types(self.return_type, decl.return_type):
return False
if self.arg_types:
if isinstance(self.arg_types, (list, tuple)):
if len(self.arg_types) != len(decl.arguments):
return False
for type_or_str, arg in zip(self.arg_types, decl.arguments):
if type_or_str is None:
continue
else:
if not self.__compare_types(type_or_str, arg.type):
return False
return True
def __compare_types(self, type_or_str, type):
assert type_or_str
if type is None:
return False
if isinstance(type_or_str, cpptypes.type_t):
if type_or_str != type:
return False
else:
if type_or_str != type.decl_string:
return False
return True
def __str__(self):
msg = [super(calldef_matcher_t, self).__str__()]
if msg == ['any']:
msg = []
if self.return_type is not None:
msg.append('(return type==%s)' % str(self.return_type))
if self.arg_types:
for i, arg_type in enumerate(self.arg_types):
if arg_type is None:
msg.append('(arg %d type==any)' % i)
else:
msg.append('(arg %d type==%s)' % (i, str(arg_type)))
if not msg:
msg.append('any')
return ' and '.join(msg)
class operator_matcher_t(calldef_matcher_t):
"""
Instance of this class will match operators by next criteria:
* :class:`calldef_matcher_t` criteria
* operator symbol: =, !=, (), [] and etc
"""
def __init__(
self,
name=None,
symbol=None,
return_type=None,
arg_types=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param symbol: operator symbol
:type symbol: str
"""
if None is decl_type:
decl_type = calldef.operator_t
calldef_matcher_t.__init__(
self,
name=name,
return_type=return_type,
arg_types=arg_types,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file)
self.symbol = symbol
def __call__(self, decl):
if not super(operator_matcher_t, self).__call__(decl):
return False
if self.symbol is not None:
if self.symbol != decl.symbol:
return False
return True
def __str__(self):
msg = [super(operator_matcher_t, self).__str__()]
if msg == ['any']:
msg = []
if self.symbol is not None:
msg.append('(symbol==%s)' % str(self.symbol))
if not msg:
msg.append('any')
return ' and '.join(msg)
class regex_matcher_t(matcher_base_t):
"""
Instance of this class will match declaration using regular expression.
User should supply a function that will extract from declaration desired
information as string. Later, this matcher will match that string using
user regular expression.
"""
def __init__(self, regex, function=None):
"""
:param regex: regular expression
:type regex: string, an instance of this class will compile it for you
:param function: function that will be called to get an information
from declaration as string. As input this function
takes single argument - reference to a declaration.
Return value should be string. If function is None,
then the matcher will use declaration name.
"""
matcher_base_t.__init__(self)
self.regex = re.compile(regex)
self.function = function
if self.function is None:
self.function = lambda decl: decl.name
def __call__(self, decl):
text = self.function(decl)
return bool(self.regex.match(text))
def __str__(self):
return '(regex=%s)' % self.regex
class access_type_matcher_t(matcher_base_t):
"""
Instance of this class will match declaration by its access type: public,
private or protected. If declarations does not have access type, for
example free function, then `False` will be returned.
"""
def __init__(self, access_type):
"""
:param access_type: declaration access type, could be "public",
"private", "protected"
:type access_type: :class: `str`
"""
matcher_base_t.__init__(self)
self.access_type = access_type
def __call__(self, decl):
if not isinstance(decl.parent, class_declaration.class_t):
return False
return (
self.access_type == decl.parent.find_out_member_access_type(decl)
)
def __str__(self):
return '(access type=%s)' % self.access_type
class virtuality_type_matcher_t(matcher_base_t):
"""
Instance of this class will match declaration by its virtual type: not
virtual, virtual or pure virtual. If declarations does not have "virtual"
property, for example free function, then `False` will be returned.
"""
def __init__(self, virtuality_type):
"""
:param access_type: declaration access type
:type access_type: :class:VIRTUALITY_TYPES defines few constants for
your convenience.
"""
matcher_base_t.__init__(self)
self.virtuality_type = virtuality_type
def __call__(self, decl):
if not isinstance(decl.parent, class_declaration.class_t):
return False
return self.virtuality_type == decl.virtuality
def __str__(self):
return '(virtuality type=%s)' % self.virtuality_type
class custom_matcher_t(matcher_base_t):
"""
Instance of this class will match declaration by user custom criteria.
"""
def __init__(self, function):
"""
:param function: callable, that takes single argument -
declaration instance should return True or False
"""
matcher_base_t.__init__(self)
self.function = function
def __call__(self, decl):
return bool(self.function(decl))
def __str__(self):
return '(user criteria)'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ActionGroupList(msrest.serialization.Model):
"""A list of action groups.
:param value: The list of action groups.
:type value: list[~$(python-base-namespace).v2018_09_01.models.ActionGroupResource]
:param next_link: Provides the link to retrieve the next set of elements.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ActionGroupResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ActionGroupList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ActionGroupPatchBody(msrest.serialization.Model):
"""An action group object for the body of patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param enabled: Indicates whether this action group is enabled. If an action group is not
enabled, then none of its actions will be activated.
:type enabled: bool
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ActionGroupPatchBody, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.enabled = kwargs.get('enabled', True)
class Resource(msrest.serialization.Model):
"""An azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class ActionGroupResource(Resource):
"""An action group resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param group_short_name: The short name of the action group. This will be used in SMS messages.
:type group_short_name: str
:param enabled: Indicates whether this action group is enabled. If an action group is not
enabled, then none of its receivers will receive communications.
:type enabled: bool
:param email_receivers: The list of email receivers that are part of this action group.
:type email_receivers: list[~$(python-base-namespace).v2018_09_01.models.EmailReceiver]
:param sms_receivers: The list of SMS receivers that are part of this action group.
:type sms_receivers: list[~$(python-base-namespace).v2018_09_01.models.SmsReceiver]
:param webhook_receivers: The list of webhook receivers that are part of this action group.
:type webhook_receivers: list[~$(python-base-namespace).v2018_09_01.models.WebhookReceiver]
:param itsm_receivers: The list of ITSM receivers that are part of this action group.
:type itsm_receivers: list[~$(python-base-namespace).v2018_09_01.models.ItsmReceiver]
:param azure_app_push_receivers: The list of AzureAppPush receivers that are part of this
action group.
:type azure_app_push_receivers:
list[~$(python-base-namespace).v2018_09_01.models.AzureAppPushReceiver]
:param automation_runbook_receivers: The list of AutomationRunbook receivers that are part of
this action group.
:type automation_runbook_receivers:
list[~$(python-base-namespace).v2018_09_01.models.AutomationRunbookReceiver]
:param voice_receivers: The list of voice receivers that are part of this action group.
:type voice_receivers: list[~$(python-base-namespace).v2018_09_01.models.VoiceReceiver]
:param logic_app_receivers: The list of logic app receivers that are part of this action group.
:type logic_app_receivers: list[~$(python-base-namespace).v2018_09_01.models.LogicAppReceiver]
:param azure_function_receivers: The list of azure function receivers that are part of this
action group.
:type azure_function_receivers:
list[~$(python-base-namespace).v2018_09_01.models.AzureFunctionReceiver]
:param arm_role_receivers: The list of ARM role receivers that are part of this action group.
Roles are Azure RBAC roles and only built-in roles are supported.
:type arm_role_receivers: list[~$(python-base-namespace).v2018_09_01.models.ArmRoleReceiver]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'group_short_name': {'max_length': 12, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'group_short_name': {'key': 'properties.groupShortName', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'email_receivers': {'key': 'properties.emailReceivers', 'type': '[EmailReceiver]'},
'sms_receivers': {'key': 'properties.smsReceivers', 'type': '[SmsReceiver]'},
'webhook_receivers': {'key': 'properties.webhookReceivers', 'type': '[WebhookReceiver]'},
'itsm_receivers': {'key': 'properties.itsmReceivers', 'type': '[ItsmReceiver]'},
'azure_app_push_receivers': {'key': 'properties.azureAppPushReceivers', 'type': '[AzureAppPushReceiver]'},
'automation_runbook_receivers': {'key': 'properties.automationRunbookReceivers', 'type': '[AutomationRunbookReceiver]'},
'voice_receivers': {'key': 'properties.voiceReceivers', 'type': '[VoiceReceiver]'},
'logic_app_receivers': {'key': 'properties.logicAppReceivers', 'type': '[LogicAppReceiver]'},
'azure_function_receivers': {'key': 'properties.azureFunctionReceivers', 'type': '[AzureFunctionReceiver]'},
'arm_role_receivers': {'key': 'properties.armRoleReceivers', 'type': '[ArmRoleReceiver]'},
}
def __init__(
self,
**kwargs
):
super(ActionGroupResource, self).__init__(**kwargs)
self.group_short_name = kwargs.get('group_short_name', None)
self.enabled = kwargs.get('enabled', True)
self.email_receivers = kwargs.get('email_receivers', None)
self.sms_receivers = kwargs.get('sms_receivers', None)
self.webhook_receivers = kwargs.get('webhook_receivers', None)
self.itsm_receivers = kwargs.get('itsm_receivers', None)
self.azure_app_push_receivers = kwargs.get('azure_app_push_receivers', None)
self.automation_runbook_receivers = kwargs.get('automation_runbook_receivers', None)
self.voice_receivers = kwargs.get('voice_receivers', None)
self.logic_app_receivers = kwargs.get('logic_app_receivers', None)
self.azure_function_receivers = kwargs.get('azure_function_receivers', None)
self.arm_role_receivers = kwargs.get('arm_role_receivers', None)
class ArmRoleReceiver(msrest.serialization.Model):
"""An arm role receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the arm role receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param role_id: Required. The arm role id.
:type role_id: str
"""
_validation = {
'name': {'required': True},
'role_id': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'role_id': {'key': 'roleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArmRoleReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.role_id = kwargs['role_id']
class AutomationRunbookReceiver(msrest.serialization.Model):
"""The Azure Automation Runbook notification receiver.
All required parameters must be populated in order to send to Azure.
:param automation_account_id: Required. The Azure automation account Id which holds this
runbook and authenticate to Azure resource.
:type automation_account_id: str
:param runbook_name: Required. The name for this runbook.
:type runbook_name: str
:param webhook_resource_id: Required. The resource id for webhook linked to this runbook.
:type webhook_resource_id: str
:param is_global_runbook: Required. Indicates whether this instance is global runbook.
:type is_global_runbook: bool
:param name: Indicates name of the webhook.
:type name: str
:param service_uri: The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'automation_account_id': {'required': True},
'runbook_name': {'required': True},
'webhook_resource_id': {'required': True},
'is_global_runbook': {'required': True},
}
_attribute_map = {
'automation_account_id': {'key': 'automationAccountId', 'type': 'str'},
'runbook_name': {'key': 'runbookName', 'type': 'str'},
'webhook_resource_id': {'key': 'webhookResourceId', 'type': 'str'},
'is_global_runbook': {'key': 'isGlobalRunbook', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AutomationRunbookReceiver, self).__init__(**kwargs)
self.automation_account_id = kwargs['automation_account_id']
self.runbook_name = kwargs['runbook_name']
self.webhook_resource_id = kwargs['webhook_resource_id']
self.is_global_runbook = kwargs['is_global_runbook']
self.name = kwargs.get('name', None)
self.service_uri = kwargs.get('service_uri', None)
class AzureAppPushReceiver(msrest.serialization.Model):
"""The Azure mobile App push notification receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the Azure mobile app push receiver. Names must be unique
across all receivers within an action group.
:type name: str
:param email_address: Required. The email address registered for the Azure mobile app.
:type email_address: str
"""
_validation = {
'name': {'required': True},
'email_address': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureAppPushReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.email_address = kwargs['email_address']
class AzureFunctionReceiver(msrest.serialization.Model):
"""An azure function receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the azure function receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param function_app_resource_id: Required. The azure resource id of the function app.
:type function_app_resource_id: str
:param function_name: Required. The function name in the function app.
:type function_name: str
:param http_trigger_url: Required. The http trigger url where http request sent to.
:type http_trigger_url: str
"""
_validation = {
'name': {'required': True},
'function_app_resource_id': {'required': True},
'function_name': {'required': True},
'http_trigger_url': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function_app_resource_id': {'key': 'functionAppResourceId', 'type': 'str'},
'function_name': {'key': 'functionName', 'type': 'str'},
'http_trigger_url': {'key': 'httpTriggerUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFunctionReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.function_app_resource_id = kwargs['function_app_resource_id']
self.function_name = kwargs['function_name']
self.http_trigger_url = kwargs['http_trigger_url']
class Baseline(msrest.serialization.Model):
"""The baseline values for a single sensitivity value.
All required parameters must be populated in order to send to Azure.
:param sensitivity: Required. The sensitivity of the baseline. Possible values include: "Low",
"Medium", "High".
:type sensitivity: str or ~$(python-base-namespace).v2018_09_01.models.Sensitivity
:param low_thresholds: Required. The low thresholds of the baseline.
:type low_thresholds: list[float]
:param high_thresholds: Required. The high thresholds of the baseline.
:type high_thresholds: list[float]
:param timestamps: the array of timestamps of the baselines.
:type timestamps: list[~datetime.datetime]
:param prediction_result_type: The prediction result type of the baseline. Possible values
include: "0", "1", "2".
:type prediction_result_type: str or
~$(python-base-namespace).v2018_09_01.models.PredictionResultType
:param error_type: The error type of the baseline. Possible values include: "0", "1", "2", "3",
"4", "100", "200".
:type error_type: str or ~$(python-base-namespace).v2018_09_01.models.ErrorType
"""
_validation = {
'sensitivity': {'required': True},
'low_thresholds': {'required': True},
'high_thresholds': {'required': True},
}
_attribute_map = {
'sensitivity': {'key': 'sensitivity', 'type': 'str'},
'low_thresholds': {'key': 'lowThresholds', 'type': '[float]'},
'high_thresholds': {'key': 'highThresholds', 'type': '[float]'},
'timestamps': {'key': 'timestamps', 'type': '[iso-8601]'},
'prediction_result_type': {'key': 'PredictionResultType', 'type': 'str'},
'error_type': {'key': 'ErrorType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Baseline, self).__init__(**kwargs)
self.sensitivity = kwargs['sensitivity']
self.low_thresholds = kwargs['low_thresholds']
self.high_thresholds = kwargs['high_thresholds']
self.timestamps = kwargs.get('timestamps', None)
self.prediction_result_type = kwargs.get('prediction_result_type', None)
self.error_type = kwargs.get('error_type', None)
class BaselineMetadataValue(msrest.serialization.Model):
"""Represents a baseline metadata value.
:param name: The name of the metadata.
:type name: ~$(python-base-namespace).v2018_09_01.models.LocalizableString
:param value: The value of the metadata.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'LocalizableString'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BaselineMetadataValue, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.value = kwargs.get('value', None)
class BaselineResponse(msrest.serialization.Model):
"""The response to a baseline query.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The metric baseline ID.
:vartype id: str
:ivar type: The resource type of the baseline resource.
:vartype type: str
:ivar name: The name and the display name of the metric, i.e. it is localizable string.
:vartype name: ~$(python-base-namespace).v2018_09_01.models.LocalizableString
:param timestamps: The array of timestamps of the baselines.
:type timestamps: list[~datetime.datetime]
:param baseline: The baseline values for each sensitivity.
:type baseline: list[~$(python-base-namespace).v2018_09_01.models.Baseline]
:param metdata: The baseline metadata values.
:type metdata: list[~$(python-base-namespace).v2018_09_01.models.BaselineMetadataValue]
:param prediction_result_type: The prediction result type of the baseline. Possible values
include: "0", "1", "2".
:type prediction_result_type: str or
~$(python-base-namespace).v2018_09_01.models.PredictionResultType
:param error_type: The error type of the baseline. Possible values include: "0", "1", "2", "3",
"4", "100", "200".
:type error_type: str or ~$(python-base-namespace).v2018_09_01.models.ErrorType
:param timespan: The timespan for which the data was retrieved. Its value consists of two
datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back
from what was originally requested.
:type timespan: str
:param interval: The interval (window size) for which the metric data was returned in. This
may be adjusted in the future and returned back from what was originally requested. This is
not present if a metadata request was made.
:type interval: ~datetime.timedelta
:param aggregation: The aggregation type of the metric.
:type aggregation: str
:ivar internal_operation_id: internal operation id.
:vartype internal_operation_id: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'internal_operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'timestamps': {'key': 'timestamps', 'type': '[iso-8601]'},
'baseline': {'key': 'baseline', 'type': '[Baseline]'},
'metdata': {'key': 'metdata', 'type': '[BaselineMetadataValue]'},
'prediction_result_type': {'key': 'predictionResultType', 'type': 'str'},
'error_type': {'key': 'errorType', 'type': 'str'},
'timespan': {'key': 'properties.timespan', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'duration'},
'aggregation': {'key': 'properties.aggregation', 'type': 'str'},
'internal_operation_id': {'key': 'properties.internalOperationId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BaselineResponse, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.timestamps = kwargs.get('timestamps', None)
self.baseline = kwargs.get('baseline', None)
self.metdata = kwargs.get('metdata', None)
self.prediction_result_type = kwargs.get('prediction_result_type', None)
self.error_type = kwargs.get('error_type', None)
self.timespan = kwargs.get('timespan', None)
self.interval = kwargs.get('interval', None)
self.aggregation = kwargs.get('aggregation', None)
self.internal_operation_id = None
class CalculateBaselineResponse(msrest.serialization.Model):
"""The response to a calculate baseline call.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The resource type of the baseline resource.
:type type: str
:param timestamps: The array of timestamps of the baselines.
:type timestamps: list[~datetime.datetime]
:param baseline: Required. The baseline values for each sensitivity.
:type baseline: list[~$(python-base-namespace).v2018_09_01.models.Baseline]
:param statistics: The statistics.
:type statistics:
~$(python-base-namespace).v2018_09_01.models.CalculateBaselineResponseStatistics
:ivar internal_operation_id: internal operation id.
:vartype internal_operation_id: str
:param error_type: The error type for calculating the baseline. Possible values include: "0",
"1", "2", "3", "4", "100", "200".
:type error_type: str or ~$(python-base-namespace).v2018_09_01.models.ErrorType
"""
_validation = {
'type': {'required': True},
'baseline': {'required': True},
'internal_operation_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timestamps': {'key': 'timestamps', 'type': '[iso-8601]'},
'baseline': {'key': 'baseline', 'type': '[Baseline]'},
'statistics': {'key': 'statistics', 'type': 'CalculateBaselineResponseStatistics'},
'internal_operation_id': {'key': 'internalOperationId', 'type': 'str'},
'error_type': {'key': 'errorType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CalculateBaselineResponse, self).__init__(**kwargs)
self.type = kwargs['type']
self.timestamps = kwargs.get('timestamps', None)
self.baseline = kwargs['baseline']
self.statistics = kwargs.get('statistics', None)
self.internal_operation_id = None
self.error_type = kwargs.get('error_type', None)
class CalculateBaselineResponseStatistics(msrest.serialization.Model):
"""The statistics.
:param is_eligible: is series eligible for dynamic threshold analysis.
:type is_eligible: bool
:param status: The list of extended status for calculating the baseline.
:type status: list[str]
:param seasonality_period: The seasonality period for calculating the baseline.
:type seasonality_period: int
"""
_attribute_map = {
'is_eligible': {'key': 'isEligible', 'type': 'bool'},
'status': {'key': 'status', 'type': '[str]'},
'seasonality_period': {'key': 'seasonalityPeriod', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(CalculateBaselineResponseStatistics, self).__init__(**kwargs)
self.is_eligible = kwargs.get('is_eligible', None)
self.status = kwargs.get('status', None)
self.seasonality_period = kwargs.get('seasonality_period', None)
class EmailReceiver(msrest.serialization.Model):
"""An email receiver.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the email receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param email_address: Required. The email address of this receiver.
:type email_address: str
:ivar status: The receiver status of the e-mail. Possible values include: "NotSpecified",
"Enabled", "Disabled".
:vartype status: str or ~$(python-base-namespace).v2018_09_01.models.ReceiverStatus
"""
_validation = {
'name': {'required': True},
'email_address': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EmailReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.email_address = kwargs['email_address']
self.status = None
class EnableRequest(msrest.serialization.Model):
"""Describes a receiver that should be resubscribed.
All required parameters must be populated in order to send to Azure.
:param receiver_name: Required. The name of the receiver to resubscribe.
:type receiver_name: str
"""
_validation = {
'receiver_name': {'required': True},
}
_attribute_map = {
'receiver_name': {'key': 'receiverName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnableRequest, self).__init__(**kwargs)
self.receiver_name = kwargs['receiver_name']
class ErrorResponse(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ItsmReceiver(msrest.serialization.Model):
"""An Itsm receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the Itsm receiver. Names must be unique across all receivers
within an action group.
:type name: str
:param workspace_id: Required. OMS LA instance identifier.
:type workspace_id: str
:param connection_id: Required. Unique identification of ITSM connection among multiple defined
in above workspace.
:type connection_id: str
:param ticket_configuration: Required. JSON blob for the configurations of the ITSM action.
CreateMultipleWorkItems option will be part of this blob as well.
:type ticket_configuration: str
:param region: Required. Region in which workspace resides. Supported
values:'centralindia','japaneast','southeastasia','australiasoutheast','uksouth','westcentralus','canadacentral','eastus','westeurope'.
:type region: str
"""
_validation = {
'name': {'required': True},
'workspace_id': {'required': True},
'connection_id': {'required': True},
'ticket_configuration': {'required': True},
'region': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'connection_id': {'key': 'connectionId', 'type': 'str'},
'ticket_configuration': {'key': 'ticketConfiguration', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ItsmReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.workspace_id = kwargs['workspace_id']
self.connection_id = kwargs['connection_id']
self.ticket_configuration = kwargs['ticket_configuration']
self.region = kwargs['region']
class LocalizableString(msrest.serialization.Model):
"""The localizable string class.
All required parameters must be populated in order to send to Azure.
:param value: Required. The invariant value.
:type value: str
:param localized_value: The locale specific value.
:type localized_value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = kwargs['value']
self.localized_value = kwargs.get('localized_value', None)
class LogicAppReceiver(msrest.serialization.Model):
"""A logic app receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the logic app receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param resource_id: Required. The azure resource id of the logic app receiver.
:type resource_id: str
:param callback_url: Required. The callback url where http request sent to.
:type callback_url: str
"""
_validation = {
'name': {'required': True},
'resource_id': {'required': True},
'callback_url': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'callback_url': {'key': 'callbackUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogicAppReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.resource_id = kwargs['resource_id']
self.callback_url = kwargs['callback_url']
class SmsReceiver(msrest.serialization.Model):
"""An SMS receiver.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the SMS receiver. Names must be unique across all receivers
within an action group.
:type name: str
:param country_code: Required. The country code of the SMS receiver.
:type country_code: str
:param phone_number: Required. The phone number of the SMS receiver.
:type phone_number: str
:ivar status: The status of the receiver. Possible values include: "NotSpecified", "Enabled",
"Disabled".
:vartype status: str or ~$(python-base-namespace).v2018_09_01.models.ReceiverStatus
"""
_validation = {
'name': {'required': True},
'country_code': {'required': True},
'phone_number': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'country_code': {'key': 'countryCode', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SmsReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.country_code = kwargs['country_code']
self.phone_number = kwargs['phone_number']
self.status = None
class TimeSeriesInformation(msrest.serialization.Model):
"""The time series info needed for calculating the baseline.
All required parameters must be populated in order to send to Azure.
:param sensitivities: Required. The list of sensitivities for calculating the baseline.
:type sensitivities: list[str]
:param values: Required. The metric values to calculate the baseline.
:type values: list[float]
:param timestamps: The array of timestamps of the baselines.
:type timestamps: list[~datetime.datetime]
"""
_validation = {
'sensitivities': {'required': True},
'values': {'required': True},
}
_attribute_map = {
'sensitivities': {'key': 'sensitivities', 'type': '[str]'},
'values': {'key': 'values', 'type': '[float]'},
'timestamps': {'key': 'timestamps', 'type': '[iso-8601]'},
}
def __init__(
self,
**kwargs
):
super(TimeSeriesInformation, self).__init__(**kwargs)
self.sensitivities = kwargs['sensitivities']
self.values = kwargs['values']
self.timestamps = kwargs.get('timestamps', None)
class VoiceReceiver(msrest.serialization.Model):
"""A voice receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the voice receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param country_code: Required. The country code of the voice receiver.
:type country_code: str
:param phone_number: Required. The phone number of the voice receiver.
:type phone_number: str
"""
_validation = {
'name': {'required': True},
'country_code': {'required': True},
'phone_number': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'country_code': {'key': 'countryCode', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VoiceReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.country_code = kwargs['country_code']
self.phone_number = kwargs['phone_number']
class WebhookReceiver(msrest.serialization.Model):
"""A webhook receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the webhook receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param service_uri: Required. The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'name': {'required': True},
'service_uri': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebhookReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.service_uri = kwargs['service_uri']
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for Turbinia."""
# pylint: disable=bad-indentation
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import getpass
import logging
import os
import sys
import uuid
from turbinia import config
from turbinia import TurbiniaException
from turbinia.lib import recipe_helpers
from turbinia.config import logger
from turbinia import __version__
from turbinia.processors import archive
from turbinia.output_manager import OutputManager
from turbinia.output_manager import GCSOutputWriter
log = logging.getLogger('turbinia')
# We set up the logger first without the file handler, and we will set up the
# file handler later once we have read the log path from the config.
logger.setup(need_file_handler=False)
def csv_list(string):
"""Helper method for having CSV argparse types.
Args:
string(str): Comma separated string to parse.
Returns:
list[str]: The parsed strings.
"""
return string.split(',')
def check_args(source_path, args):
"""Checks lengths of supplied args match or raise an error.
Lists can have only one element where they are automatically extended.
Args:
source_path(list(str)): List of source_paths supplied to turbiniactl.
args(list(list)): List of args (i.e. name, source, partitions, etc) and
their values supplied to turbiniactl.
Raises:
TurbiniaException: If length of args don't match.
Returns:
list(str): List of arg or None """
ret = list()
if not args[0]:
args[0] = source_path
for arg in args:
if not arg:
arg = [None]
if len(arg) > 1 and len(arg) != len(source_path):
raise TurbiniaException(
'Number of passed in args ({0:d}) must equal to one or '
'number of source_paths/disks ({1:d}).'.format(
len(arg), len(source_path)))
if len(arg) == 1:
arg = [arg[0] for _ in source_path]
ret.append(arg)
return ret
def process_args(args):
"""Parses and processes args.
Args:
args(namespace): turbiniactl args.
Raises:
TurbiniaException: If theres an error processing args.
"""
parser = argparse.ArgumentParser(
description='Turbinia can bulk process multiple evidence of same type '
'(i.e. rawdisk, google cloud disk). For bulk processing, pass in csv '
'list of args to be processed. If all pieces of evidence share the same '
'property, such as project or source, there is no need for repeating '
'those values in the command.')
parser.add_argument(
'-q', '--quiet', action='store_true', help='Show minimal output')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Show verbose output',
default=True)
parser.add_argument(
'-d', '--debug', action='store_true', help='Show debug output',
default=False)
parser.add_argument(
'-a', '--all_fields', action='store_true',
help='Show all task status fields in output', required=False)
parser.add_argument(
'-c', '--config_file', help='Load explicit config file. If specified it '
'will ignore config files in other default locations '
'(/etc/turbinia.conf, ~/.turbiniarc, or in paths referenced in '
'environment variable TURBINIA_CONFIG_PATH)', required=False)
parser.add_argument(
'-I', '--recipe', help='Name of Recipe to be employed on evidence',
required=False)
parser.add_argument(
'-P', '--recipe_path', help='Recipe file path to load and use.',
required=False)
parser.add_argument(
'-X', '--skip_recipe_validation', action='store_true', help='Do not '
'perform recipe validation on the client.', required=False, default=False)
parser.add_argument(
'-f', '--force_evidence', action='store_true',
help='Force evidence processing request in potentially unsafe conditions',
required=False)
parser.add_argument(
'-k', '--decryption_keys', help='Decryption keys to be passed in as '
' comma separated list. Each entry should be in the form type=key. (e.g. '
'"-k password=123456,recovery_password=XXXX-XXXX-XXXX-XXXX-XXXX-XXXX")',
default=[], type=csv_list)
parser.add_argument('-o', '--output_dir', help='Directory path for output')
parser.add_argument('-L', '--log_file', help='Log file')
parser.add_argument(
'-r', '--request_id', help='Create new requests with this Request ID',
required=False)
parser.add_argument(
'-V', '--version', action='version', version=__version__,
help='Show the version')
parser.add_argument(
'-D', '--dump_json', action='store_true',
help='Dump JSON output of Turbinia Request instead of sending it')
parser.add_argument(
'-F', '--filter_patterns_file',
help='A file containing newline separated string patterns to filter '
'text based evidence files with (in extended grep regex format). '
'This filtered output will be in addition to the complete output')
parser.add_argument(
'-Y', '--yara_rules_file', help='A file containing Yara rules.')
parser.add_argument(
'-j', '--jobs_allowlist', default=[], type=csv_list,
help='An allowlist for Jobs that will be allowed to run (in CSV format, '
'no spaces). This will not force them to run if they are not configured '
'to. This is applied both at server start time and when the client makes '
'a processing request. When applied at server start time the change is '
'persistent while the server is running. When applied by the client, it '
'will only affect that processing request.')
parser.add_argument(
'-J', '--jobs_denylist', default=[], type=csv_list,
help='A denylist for Jobs we will not allow to run. See '
'--jobs_allowlist help for details on format and when it is applied.')
parser.add_argument(
'-p', '--poll_interval', default=60, type=int,
help='Number of seconds to wait between polling for task state info')
parser.add_argument(
'-T', '--debug_tasks', action='store_true',
help='Show debug output for all supported tasks', default=False)
parser.add_argument(
'-w', '--wait', action='store_true',
help='Wait to exit until all tasks for the given request have completed')
subparsers = parser.add_subparsers(
dest='command', title='Commands', metavar='<command>')
# Action for printing config
parser_config = subparsers.add_parser('config', help='Print out config file')
parser_config.add_argument(
'-f', '--file_only', action='store_true', help='Print out file path only')
#Sends Test Notification
parser_testnotify = subparsers.add_parser(
'testnotify', help='Sends test notification')
# TODO(aarontp): Find better way to specify these that allows for multiple
# pieces of evidence to be submitted. Maybe automagically create different
# commands based on introspection of evidence objects?
# RawDisk
parser_rawdisk = subparsers.add_parser(
'rawdisk', help='Process RawDisk as Evidence (bulk processable)')
parser_rawdisk.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_rawdisk.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_rawdisk.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for Google Cloud Disk Evidence type
parser_googleclouddisk = subparsers.add_parser(
'googleclouddisk',
help='Process Google Cloud Persistent Disk as Evidence '
'(bulk processable)')
parser_googleclouddisk.add_argument(
'-C', '--copy_only', action='store_true', help='Only copy disk and do '
'not process with Turbinia. This only takes effect when a source '
'--project is defined and can be run without any Turbinia server or '
'workers configured.')
parser_googleclouddisk.add_argument(
'-d', '--disk_name', help='Google Cloud name for disk', required=True,
type=csv_list)
parser_googleclouddisk.add_argument(
'-p', '--project', help='Project that the disk to process is associated '
'with. If this is different from the project that Turbinia is running '
'in, it will be copied to the Turbinia project.', type=csv_list)
parser_googleclouddisk.add_argument(
'-z', '--zone', help='Geographic zone the disk exists in', type=csv_list)
parser_googleclouddisk.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_googleclouddisk.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for Google Cloud Persistent Disk Embedded Raw Image
parser_googleclouddiskembedded = subparsers.add_parser(
'googleclouddiskembedded',
help='Process Google Cloud Persistent Disk with an embedded raw disk '
'image as Evidence (bulk processable)')
parser_googleclouddiskembedded.add_argument(
'-C', '--copy_only', action='store_true', help='Only copy disk and do '
'not process with Turbinia. This only takes effect when a source '
'--project is defined and can be run without any Turbinia server or '
'workers configured.')
parser_googleclouddiskembedded.add_argument(
'-e', '--embedded_path',
help='Path within the Persistent Disk that points to the raw image file',
required=True, type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-d', '--disk_name', help='Google Cloud name for disk', required=True,
type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-p', '--project', help='Project that the disk to process is associated '
'with. If this is different from the project that Turbinia is running '
'in, it will be copied to the Turbinia project.', type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-P', '--mount_partition', type=csv_list, default=[1],
help='The partition number as an integer to use when mounting the '
'parent disk. Defaults to the first partition. Only affects mounting, and '
'not what gets processed.')
parser_googleclouddiskembedded.add_argument(
'-z', '--zone', help='Geographic zone the disk exists in', type=csv_list)
parser_googleclouddiskembedded.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_googleclouddiskembedded.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# RawMemory
parser_rawmemory = subparsers.add_parser(
'rawmemory', help='Process RawMemory as Evidence (bulk processable)')
parser_rawmemory.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_rawmemory.add_argument(
'-P', '--profile', help='Profile to use with Volatility', required=True,
type=csv_list)
parser_rawmemory.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
parser_rawmemory.add_argument(
'-m', '--module_list', type=csv_list,
help='Volatility module(s) to execute', required=True)
# Parser options for Directory evidence type
parser_directory = subparsers.add_parser(
'directory', help='Process a directory as Evidence (bulk processable)')
parser_directory.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_directory.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_directory.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for CompressedDirectory evidence type
parser_directory = subparsers.add_parser(
'compresseddirectory', help='Process a compressed tar file as Evidence '
'(bulk processable)')
parser_directory.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_directory.add_argument(
'-s', '--source', help='Description of the source of the evidence',
required=False, type=csv_list, default=[None])
parser_directory.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# Parser options for ChromiumProfile evidence type
parser_hindsight = subparsers.add_parser(
'hindsight', help='Process ChromiumProfile as Evidence '
'(bulk processable)')
parser_hindsight.add_argument(
'-l', '--source_path', help='Local path to the evidence', required=True,
type=csv_list)
parser_hindsight.add_argument(
'-f', '--format', help='Output format (supported types are '
'xlsx, sqlite, jsonl)', type=csv_list, default=['sqlite'])
parser_hindsight.add_argument(
'-b', '--browser_type', help='The type of browser the input files belong'
'to (supported types are Chrome, Brave)', type=csv_list,
default=['Chrome'])
parser_hindsight.add_argument(
'-n', '--name', help='Descriptive name of the evidence', required=False,
type=csv_list)
# List Jobs
subparsers.add_parser(
'listjobs',
help='List all available Jobs. These Job names can be used by '
'--jobs_allowlist and --jobs_denylist')
# PSQ Worker
parser_psqworker = subparsers.add_parser('psqworker', help='Run PSQ worker')
parser_psqworker.add_argument(
'-S', '--single_threaded', action='store_true',
help='Run PSQ Worker in a single thread', required=False)
# Celery Worker
subparsers.add_parser('celeryworker', help='Run Celery worker')
# Parser options for Turbinia status command
parser_status = subparsers.add_parser(
'status', help='Get Turbinia Task status')
parser_status.add_argument(
'-c', '--close_tasks', action='store_true',
help='Close tasks based on Request ID or Task ID', required=False)
parser_status.add_argument(
'-C', '--csv', action='store_true',
help='When used with --statistics, the output will be in CSV format',
required=False)
parser_status.add_argument(
'-d', '--days_history', default=0, type=int,
help='Number of days of history to show', required=False)
parser_status.add_argument(
'-D', '--dump_json', action='store_true',
help='Dump JSON status output instead text. Compatible with -d, -u, '
'-r and -t flags, but not others')
parser_status.add_argument(
'-f', '--force', help='Gatekeeper for --close_tasks', action='store_true',
required=False)
parser_status.add_argument(
'-r', '--request_id',
help='Show all tasks for this Request ID. A request to process Evidence will '
'generate a unique request ID and this option will show all Tasks associated '
'with this request.', required=False)
# 20 == Priority.High. We are setting this manually here because we don't want
# to load the worker module yet in order to access this Enum.
parser_status.add_argument(
'-p', '--priority_filter', default=20, type=int, required=False,
help='This sets what report sections are shown in full detail in '
'report output. Any tasks that have set a report_priority value '
'equal to or lower than this setting will be shown in full detail, and '
'tasks with a higher value will only have a summary shown. To see all '
'tasks report output in full detail, set --priority_filter=100')
parser_status.add_argument(
'-R', '--full_report',
help='Generate full markdown report instead of just a summary',
action='store_true', required=False)
parser_status.add_argument(
'-s', '--statistics', help='Generate statistics only',
action='store_true', required=False)
parser_status.add_argument(
'-t', '--task_id', help='Show task data for the given Task ID. A '
'processing request can generate multiple Tasks as part of the request '
'and this will filter to only the specified Task.', required=False)
parser_status.add_argument(
'-u', '--user', help='Show task for given user', required=False)
parser_status.add_argument(
'-i', '--requests', required=False, action='store_true',
help='Show all requests from a specified timeframe. The default '
'timeframe is 7 days. Please use the -d flag to extend this.')
parser_status.add_argument(
'-g', '--group_id', help='Show Requests for given group ID. This command'
' only shows the related requests and overview of their task status. Run '
'--full_report for the full list of requests and their tasks.',
required=False)
parser_status.add_argument(
'-w', '--workers', required=False, action='store_true',
help='Show Worker status information from a specified timeframe. The '
'default timeframe is 7 days. Please use the -d flag to extend this. '
'Additionaly, you can use the -a or --all_fields flag to retrieve the '
'full output containing finished and unassigned worker tasks.')
parser_log_collector = subparsers.add_parser(
'gcplogs', help='Collects Turbinia logs from Stackdriver.')
parser_log_collector.add_argument(
'-o', '--output_dir', help='Directory path for output', required=False)
parser_log_collector.add_argument(
'-q', '--query',
help='Filter expression to use to query Stackdriver logs.')
parser_log_collector.add_argument(
'-d', '--days_history', default=1, type=int,
help='Number of days of history to show', required=False)
parser_log_collector.add_argument(
'-s', '--server_logs', action='store_true',
help='Collects all server related logs.')
parser_log_collector.add_argument(
'-w', '--worker_logs', action='store_true',
help='Collects all worker related logs.')
# Add GCS logs collector
parser_gcs_logs = subparsers.add_parser(
'dumpgcs', help='Get Turbinia results from Google Cloud Storage.')
parser_gcs_logs.add_argument(
'-o', '--output_dir', help='Directory path for output.', required=True)
parser_gcs_logs.add_argument(
'-t', '--task_id', help='Download all the results for given task_id.')
parser_gcs_logs.add_argument(
'-r', '--request_id',
help='Download the results for all Tasks for the given request_id.')
parser_gcs_logs.add_argument(
'-b', '--bucket',
help='Alternate GCS bucket to download from. Must be in the following '
'format gs://{BUCKET_NAME}/. Defaults to the BUCKET_NAME as specified '
'in the config')
parser_gcs_logs.add_argument(
'-d', '--days_history', default=0, type=int,
help='Number of days of history to to query results for', required=False)
parser_gcs_logs.add_argument(
'-i', '--instance_id',
help='Instance ID used to run tasks/requests. You must provide an '
'instance ID if the task/request was not processed on the same instance '
'as your confing file.')
# Server
subparsers.add_parser('server', help='Run Turbinia Server')
args = parser.parse_args(args)
# Load the config before final logger setup so we can the find the path to the
# log file.
try:
if args.config_file:
config.LoadConfig(config_file=args.config_file)
else:
config.LoadConfig()
except TurbiniaException as exception:
print(
'Could not load config file ({0!s}).\n{1:s}'.format(
exception, config.CONFIG_MSG))
sys.exit(1)
if args.log_file:
user_specified_log = args.log_file
if args.output_dir:
config.OUTPUT_DIR = args.output_dir
config.TURBINIA_COMMAND = args.command
server_flags_set = args.command == 'server'
worker_flags_set = args.command in ('psqworker', 'celeryworker')
# Run logger setup again if we're running as a server or worker (or have a log
# file explicitly set on the command line) to set a file-handler now that we
# have the logfile path from the config.
if server_flags_set or worker_flags_set or args.log_file:
if args.log_file:
logger.setup(log_file_path=user_specified_log)
else:
logger.setup()
if args.quiet:
log.setLevel(logging.ERROR)
elif args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# Enable tasks debugging for supported tasks
if args.debug_tasks:
config.DEBUG_TASKS = True
if config.TASK_MANAGER == 'PSQ':
from turbinia.lib import google_cloud
from libcloudforensics.providers.gcp import forensics as gcp_forensics
# Enable GCP Stackdriver Logging
if config.STACKDRIVER_LOGGING and args.command in ('server', 'psqworker'):
google_cloud.setup_stackdriver_handler(
config.TURBINIA_PROJECT, args.command)
log.info('Turbinia version: {0:s}'.format(__version__))
# Do late import of other needed Turbinia modules. This is needed because the
# config is loaded by these modules at load time, and we want to wait to load
# the config until after we parse the args so that we can use those arguments
# to point to config paths.
from turbinia import notify
from turbinia import client as TurbiniaClientProvider
from turbinia.worker import TurbiniaCeleryWorker
from turbinia.worker import TurbiniaPsqWorker
from turbinia.server import TurbiniaServer
# Print out config if requested
if args.command == 'config':
if args.file_only:
log.info('Config file path is {0:s}\n'.format(config.configSource))
sys.exit(0)
try:
with open(config.configSource, "r") as f:
print(f.read())
sys.exit(0)
except IOError as exception:
msg = (
'Failed to read config file {0:s}: {1!s}'.format(
config.configSource, exception))
raise TurbiniaException(msg)
#sends test notification
if args.command == 'testnotify':
notify.sendmail(
config.EMAIL_ADDRESS, 'Turbinia test notification',
'This is a test notification')
sys.exit(0)
args.jobs_allowlist = [j.lower() for j in args.jobs_allowlist]
args.jobs_denylist = [j.lower() for j in args.jobs_denylist]
# Read set set filter_patterns
filter_patterns = []
if (args.filter_patterns_file and
not os.path.exists(args.filter_patterns_file)):
msg = 'Filter patterns file {0:s} does not exist.'.format(
args.filter_patterns_file)
raise TurbiniaException(msg)
elif args.filter_patterns_file:
try:
filter_patterns = open(args.filter_patterns_file).read().splitlines()
except IOError as e:
log.warning(
'Cannot open file {0:s} [{1!s}]'.format(args.filter_patterns_file, e))
# Read yara rules
yara_rules = ''
if (args.yara_rules_file and not os.path.exists(args.yara_rules_file)):
msg = 'Filter patterns file {0:s} does not exist.'.format(
args.yara_rules_file)
raise TurbiniaException(msg)
elif args.yara_rules_file:
try:
yara_rules = open(args.yara_rules_file).read()
except IOError as e:
msg = ('Cannot open file {0:s} [{1!s}]'.format(args.yara_rules_file, e))
raise TurbiniaException(msg)
# Create Client object
client = None
if args.command not in ('psqworker', 'server'):
client = TurbiniaClientProvider.get_turbinia_client()
# Set group id
group_id = uuid.uuid4().hex
# Checks for bulk processing
if args.command in ('rawdisk', 'directory', 'compresseddirectory'):
args.name, args.source = check_args(
args.source_path, [args.name, args.source])
# Iterate through evidence and call process_evidence
for i, source_path in enumerate(args.source_path):
name = args.name[i]
source = args.source[i]
process_evidence(
args=args, source_path=source_path, name=name, source=source,
group_id=group_id, filter_patterns=filter_patterns, client=client,
yara_rules=yara_rules)
elif args.command in ('googleclouddisk', 'googleclouddiskembedded'):
# Fail if this is a local instance
if config.SHARED_FILESYSTEM and not args.force_evidence:
msg = (
'The evidence type {0:s} is Cloud only, and this instance of '
'Turbinia is not a cloud instance.'.format(args.command))
raise TurbiniaException(msg)
# Check cloud zones
if not args.zone and config.TURBINIA_ZONE:
args.zone = [config.TURBINIA_ZONE]
elif not args.zone and not config.TURBINIA_ZONE:
msg = 'Turbinia zone must be set by --zone or in config.'
raise TurbiniaException(msg)
# Checks for cloud project
if not args.project and config.TURBINIA_PROJECT:
args.project = [config.TURBINIA_PROJECT]
elif not args.project and not config.TURBINIA_PROJECT:
msg = 'Turbinia project must be set by --project or in config'
raise TurbiniaException(msg)
# Since mount_partition and embedded_path are not in cloud disk namespace,
# Setting them to None here
if args.command == 'googleclouddisk':
args.mount_partition = None
args.embedded_path = None
(
args.name, args.source, args.project, args.zone, args.mount_partition,
args.embedded_path) = check_args(
args.disk_name, [
args.name, args.source, args.project, args.zone,
args.mount_partition, args.embedded_path
])
mount_partition = None
embedded_path = None
for i, disk_name in enumerate(args.disk_name):
project = args.project[i]
zone = args.zone[i]
name = args.name[i]
source = args.source[i]
if args.command == 'googleclouddiskembedded':
embedded_path = args.embedded_path[i]
mount_partition = args.mount_partition[i]
if ((project and project != config.TURBINIA_PROJECT) or
(zone and zone != config.TURBINIA_ZONE)):
new_disk = gcp_forensics.CreateDiskCopy(
project, config.TURBINIA_PROJECT, None, config.TURBINIA_ZONE,
disk_name=disk_name)
disk_name = new_disk.name
if args.copy_only:
log.info(
'--copy_only specified, so not processing {0:s} with '
'Turbinia'.format(disk_name))
continue
process_evidence(
args=args, disk_name=disk_name, name=name, source=source,
project=project, zone=zone, embedded_path=embedded_path,
mount_partition=mount_partition, group_id=group_id,
filter_patterns=filter_patterns, client=client, yara_rules=yara_rules)
elif args.command == 'rawmemory':
# Checks if length of args match
args.name, args.profile = check_args(
args.source_path, [args.name, args.profile])
for i, source_path in enumerate(args.source_path):
profile = args.profile[i]
name = args.name[i]
process_evidence(
args=args, source_path=source_path, name=name, profile=profile,
group_id=group_id, filter_patterns=filter_patterns, client=client,
yara_rules=yara_rules)
elif args.command == 'hindsight':
args.name, args.browser_type, args.format = check_args(
args.source_path, [args.name, args.browser_type, args.format])
for i, source_path in enumerate(args.source_path):
name = args.name[i]
browser_type = args.browser_type[i]
format = args.format[i]
process_evidence(
args=args, source_path=source_path, name=name, format=format,
group_id=group_id, client=client, filter_patterns=filter_patterns,
yara_rules=yara_rules, browser_type=browser_type)
elif args.command == 'psqworker':
# Set up root logger level which is normally set by the psqworker command
# which we are bypassing.
logger.setup()
worker = TurbiniaPsqWorker(
jobs_denylist=args.jobs_denylist, jobs_allowlist=args.jobs_allowlist)
worker.start()
elif args.command == 'celeryworker':
logger.setup()
worker = TurbiniaCeleryWorker(
jobs_denylist=args.jobs_denylist, jobs_allowlist=args.jobs_allowlist)
worker.start()
elif args.command == 'server':
server = TurbiniaServer(
jobs_denylist=args.jobs_denylist, jobs_allowlist=args.jobs_allowlist)
server.start()
elif args.command == 'status':
region = config.TURBINIA_REGION
if args.request_id and args.group_id:
msg = (
'Cannot run status command with request ID and group ID. Please '
'only specify one.')
raise TurbiniaException(msg)
if args.close_tasks:
if args.group_id:
msg = 'The --close_task flag is not compatible with --group_id.'
raise TurbiniaException(msg)
if args.user or args.request_id or args.task_id:
print(
client.close_tasks(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=args.request_id, task_id=args.task_id,
user=args.user, requester=getpass.getuser()))
sys.exit(0)
else:
log.info(
'--close_tasks (-c) requires --user, --request_id, or/and --task_id'
)
sys.exit(1)
if args.dump_json and (args.statistics or args.requests or args.workers):
log.info(
'The --dump_json flag is not compatible with --statistics, '
'--reqeusts, or --workers flags')
sys.exit(1)
if args.statistics:
print(
client.format_task_statistics(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history, task_id=args.task_id,
request_id=args.request_id, user=args.user, csv=args.csv))
sys.exit(0)
if args.wait and args.request_id:
client.wait_for_request(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=args.request_id, user=args.user,
poll_interval=args.poll_interval)
elif args.wait and not args.request_id:
log.info(
'--wait requires --request_id, which is not specified. '
'turbiniactl will exit without waiting.')
if args.requests:
print(
client.format_request_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history,
all_fields=args.all_fields))
sys.exit(0)
if args.workers:
print(
client.format_worker_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history,
all_fields=args.all_fields))
sys.exit(0)
if args.dump_json:
output_json = True
else:
output_json = False
print(
client.format_task_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, days=args.days_history, task_id=args.task_id,
request_id=args.request_id, group_id=args.group_id, user=args.user,
all_fields=args.all_fields, full_report=args.full_report,
priority_filter=args.priority_filter, output_json=output_json))
sys.exit(0)
elif args.command == 'listjobs':
log.info('Available Jobs:')
client.list_jobs()
elif args.command == 'gcplogs':
if not config.STACKDRIVER_LOGGING:
msg = 'Stackdriver logging must be enabled in order to use this.'
raise TurbiniaException(msg)
if args.output_dir and not os.path.isdir(args.output_dir):
msg = 'Please provide a valid directory path.'
raise TurbiniaException(msg)
query = None
if args.query:
query = args.query
if args.worker_logs:
if query:
query = 'jsonPayload.origin="psqworker" {0:s}'.format(query)
else:
query = 'jsonPayload.origin="psqworker"'
if args.server_logs:
if query:
query = 'jsonPayload.origin="server" {0:s}'.format(query)
else:
query = 'jsonPayload.origin="server"'
google_cloud.get_logs(
config.TURBINIA_PROJECT, args.output_dir, args.days_history, query)
elif args.command == 'dumpgcs':
if not config.GCS_OUTPUT_PATH and not args.bucket:
msg = 'GCS storage must be enabled in order to use this.'
raise TurbiniaException(msg)
if not args.task_id and not args.request_id:
msg = 'You must specify one of task_id or request_id.'
raise TurbiniaException(msg)
if not os.path.isdir(args.output_dir):
msg = 'Please provide a valid directory path.'
raise TurbiniaException(msg)
gcs_bucket = args.bucket if args.bucket else config.GCS_OUTPUT_PATH
instance_id = args.instance_id if args.instance_id else config.INSTANCE_ID
try:
task_data = client.get_task_data(
instance=instance_id, days=args.days_history,
project=config.TURBINIA_PROJECT, region=config.TURBINIA_REGION,
task_id=args.task_id, request_id=args.request_id,
function_name='gettasks')
output_writer = GCSOutputWriter(
gcs_bucket, local_output_dir=args.output_dir)
if not task_data:
msg = 'No Tasks found for task/request ID'
raise TurbiniaException(msg)
if args.task_id:
log.info(
'Downloading GCS files for task_id {0:s} to {1:s}.'.format(
args.task_id, args.output_dir))
for task in task_data:
if task['id'] == args.task_id:
if task['saved_paths']:
output_writer.copy_from_gcs(task['saved_paths'])
if args.request_id:
log.info(
'Downloading GCS files for request_id {0:s} to {1:s}.'.format(
args.request_id, args.output_dir))
paths = []
for task in task_data:
if task['saved_paths']:
paths.extend(task['saved_paths'])
output_writer.copy_from_gcs(paths)
except TurbiniaException as exception:
log.error('Failed to pull the data {0!s}'.format(exception))
else:
log.warning('Command {0!s} not implemented.'.format(args.command))
# TODO: shard this function and move some of its functionalities to other files
# (move some of this to evidence.py to run the checks etc)
def process_evidence(
client, group_id, args=None, browser_type=None, disk_name=None,
embedded_path=None, filter_patterns=None, format=None, mount_partition=None,
name=None, profile=None, project=None, source=None, source_path=None,
yara_rules=None, zone=None):
"""Creates evidence and turbinia request.
Args:
client(TurbiniaClient): TurbiniaClient used for creating requests.
group_id(str): Group ID used for bulk processing.
args(Namespace): commandline args.
browser_type(str): Browser type used for hindsight.
disk_name(str): Disk name used for processing cloud evidence.
embedded_path(str): Embedded path for clouddiskembedded.
filter_pattern(str): Filter patterns used for processing evidence.
format(str): Output format for hindsight.
mount_partition(int): Mount partition for clouddiskembedded.
name(str): Evidence name.
profile(list(str)): List of volatility profiles used for rawmemory.
project(str): Project for cloud related evidence.
source(str): Source for evidence.
source_path(str): Source path used for host evidence.
yara_rules(str): Yara rule for processing evidence.
zone(str): Could zone used for cloud evidence.
"""
from turbinia import evidence
# Set request id
request_id = args.request_id if args.request_id else uuid.uuid4().hex
# Start Evidence configuration
evidence_ = None
if args.command == 'rawdisk':
evidence_ = evidence.RawDisk(
name=name, source_path=os.path.abspath(source_path), source=source)
elif args.command == 'directory':
source_path = os.path.abspath(source_path)
if not config.SHARED_FILESYSTEM:
log.info(
'A Cloud Only Architecture has been detected. '
'Compressing the directory for GCS upload.')
source_path = archive.CompressDirectory(
source_path, output_path=config.TMP_DIR)
evidence_ = evidence.CompressedDirectory(
name=name, source_path=source_path, source=source)
else:
evidence_ = evidence.Directory(
name=name, source_path=source_path, source=source)
elif args.command == 'compresseddirectory':
archive.ValidateTarFile(source_path)
evidence_ = evidence.CompressedDirectory(
name=name, source_path=os.path.abspath(source_path), source=source)
elif args.command == 'googleclouddisk':
evidence_ = evidence.GoogleCloudDisk(
name=name, disk_name=disk_name, project=project, zone=zone,
source=source)
elif args.command == 'googleclouddiskembedded':
parent_evidence_ = evidence.GoogleCloudDisk(
name=name, disk_name=disk_name, project=project, source=source,
mount_partition=mount_partition, zone=zone)
evidence_ = evidence.GoogleCloudDiskRawEmbedded(
name=name, disk_name=disk_name, project=project, zone=zone,
embedded_path=embedded_path)
evidence_.set_parent(parent_evidence_)
elif args.command == 'hindsight':
if format not in ['xlsx', 'sqlite', 'jsonl']:
msg = 'Invalid output format.'
raise TurbiniaException(msg)
if browser_type not in ['Chrome', 'Brave']:
msg = 'Browser type not supported.'
raise TurbiniaException(msg)
source_path = os.path.abspath(source_path)
evidence_ = evidence.ChromiumProfile(
name=name, source_path=source_path, output_format=format,
browser_type=browser_type)
elif args.command == 'rawmemory':
source_path = os.path.abspath(source_path)
evidence_ = evidence.RawMemory(
name=name, source_path=source_path, profile=profile,
module_list=args.module_list)
if evidence_ and not args.force_evidence:
if not config.SHARED_FILESYSTEM and evidence_.copyable:
if os.path.exists(evidence_.local_path):
output_manager = OutputManager()
output_manager.setup(evidence_.type, request_id, remote_only=True)
output_manager.save_evidence(evidence_)
else:
msg = (
'The evidence local path does not exist: {0:s}. Please submit '
'a new Request with a valid path.'.format(evidence_.local_path))
raise TurbiniaException(msg)
elif not config.SHARED_FILESYSTEM and not evidence_.cloud_only:
msg = (
'The evidence type {0:s} cannot run on Cloud instances of '
'Turbinia. Consider wrapping it in a '
'GoogleCloudDiskRawEmbedded or other Cloud compatible '
'object'.format(evidence_.type))
raise TurbiniaException(msg)
request = None
if evidence_:
request = client.create_request(
request_id=request_id, group_id=group_id, requester=getpass.getuser())
request.evidence.append(evidence_)
if args.decryption_keys:
for credential in args.decryption_keys:
try:
credential_type, credential_data = credential.split('=')
except ValueError as exception:
msg = (
'Could not parse credential [{0:s}] from decryption keys '
'{1!s}: {2!s}'.format(
credential, args.decryption_keys, exception))
raise TurbiniaException(msg)
evidence_.credentials.append((credential_type, credential_data))
# Recipe pre-condition checks.
if args.recipe and args.recipe_path:
msg = ('Expected a recipe name (-I) or path (-P) but found both.')
raise TurbiniaException(msg)
if args.recipe or args.recipe_path:
# Load the specified recipe.
recipe_dict = client.create_recipe(
debug_tasks=args.debug_tasks, filter_patterns=filter_patterns,
group_id=group_id, jobs_allowlist=args.jobs_allowlist,
jobs_denylist=args.jobs_denylist,
recipe_name=args.recipe if args.recipe else args.recipe_path,
sketch_id=None, skip_recipe_validation=args.skip_recipe_validation,
yara_rules=yara_rules)
request.recipe = recipe_dict
if args.dump_json:
print(request.to_json().encode('utf-8'))
sys.exit(0)
else:
log.info(
'Creating request {0:s} with group id {1:s} and evidence '
'{2:s}'.format(request.request_id, request.group_id, evidence_.name))
# TODO add a new log line when group status is implemented
log.info(
'Run command "turbiniactl status -r {0:s}" to see the status of'
' this request and associated tasks'.format(request.request_id))
client.send_request(request)
if args.wait:
log.info(
'Waiting for request {0:s} to complete'.format(request.request_id))
region = config.TURBINIA_REGION
client.wait_for_request(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=request.request_id,
poll_interval=args.poll_interval)
print(
client.format_task_status(
instance=config.INSTANCE_ID, project=config.TURBINIA_PROJECT,
region=region, request_id=request.request_id,
all_fields=args.all_fields))
def main():
"""Main function for turbiniactl"""
try:
process_args(sys.argv[1:])
except TurbiniaException as e:
log.error('There was a problem processing arguments: {0:s}'.format(str(e)))
sys.exit(1)
log.info('Done.')
sys.exit(0)
if __name__ == '__main__':
main()
|
|
from gui.plugins.settings.create_simulation_impl import AbstractAddPlugin
import uuid
from tools.ecu_logging import ECULogger
import api
from gui.gui_builder import GBuilder
from PyQt4.Qt import QVBoxLayout
from PyQt4.QtGui import QHBoxLayout
from components.security.ecu.types.impl_ecu_secure import SecureECU, \
StdSecurECUTimingFunctions
from tools.singleton import Singleton
import os
from components.base.bus.impl_bus_can import StdCANBus
from components.security.ecu.types.impl_sec_mod_lwa import StdSecurLwSecModTimingFunctions, \
SecLwAuthSecurityModule
from api.core.component_specs import SimpleECUSpec
class AddWidgetAdapter(Singleton):
''' This class connects the ECU classes to their respective
GUI processing units'''
def __init__(self):
pass
class SecureECUAddWidget(AbstractAddPlugin):
''' This is the interface that connects the GUI and the actions to
execute for a SecureECU when it is to be created in the new
simulation window '''
GUI_NAME = "ECU Secure"
GUI_ICON = os.getcwd() + r'/icons/secure_ecu.png'
def __init__(self, parent):
AbstractAddPlugin.__init__(self, parent)
self._create_widgets(parent)
self.parent = parent
self.set_ecu_settings = {}
self.set_time_lib = {}
self.has_sec_mod_cert = False
self.id = uuid.uuid4()
self.ecu_group = None
def set_gui(self, mapp):
''' set the gui from the map received '''
try:
if "id_list" in mapp: self.id_list.setText(str(mapp["id_list"]))
if "send_buffer" in mapp: self.send_buf_textedit.setText(str(mapp["send_buffer"]))
if "rec_buffer" in mapp: self.rec_buf_textedit.setText(str(mapp["rec_buffer"]))
if "nr_ecus" in mapp: self.nr_ecus_textedit.setText(str(mapp["nr_ecus"]))
if "ecu_settings" in mapp: self.set_ecu_settings = mapp["ecu_settings"]
self._set_cb_changed()
if "ecu_timing" in mapp: self.set_time_lib = mapp["ecu_timing"]
index = self.ecu_set_time_sel_cb.findText(self.set_time_lib[self.ecu_set_time_cb.currentText()])
self.ecu_set_time_sel_cb.setCurrentIndex(index)
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
if "has_sec_mod_cert" in mapp: self.has_sec_mod_cert = mapp["has_sec_mod_cert"]
if not self.has_sec_mod_cert: self.has_sec_mod_cert_cb.setCurrentIndex(0)
except:
ECULogger().log_traceback()
def get_map(self):
mapping_values = {}
# Read the values from the gui and save them
# General Information
mapping_values["id_list"] = self._wrap(eval, self.id_list.text(), [])
mapping_values["send_buffer"] = self._wrap(int, self.send_buf_textedit.text(), 200)
mapping_values["rec_buffer"] = self._wrap(int, self.rec_buf_textedit.text(), 200)
mapping_values["nr_ecus"] = self._wrap(int, self.nr_ecus_textedit.text(), 0)
if self.ecu_set_te.text():
self.set_ecu_settings[self._cur_set_entry] = self.ecu_set_te.text()
mapping_values["ecu_settings"] = self.set_ecu_settings
# Certification and Timing
self.set_time_lib[self._cur_set_time_entry] = self.ecu_set_time_sel_cb.currentText() # Final entry
mapping_values['ecu_timing'] = self.set_time_lib
mapping_values['has_sec_mod_cert'] = self.has_sec_mod_cert
# mapping_values['connected_sec_mod'] = None
return mapping_values
def preprocess(self, env, mapp):
self.ecu_spec = SimpleECUSpec(mapp["id_list"] , mapp["send_buffer"], mapp["rec_buffer"])
for k in mapp["ecu_settings"]:
self.ecu_spec.set_ecu_setting(k, mapp["ecu_settings"][k])
self.ecu_group = api.ecu_sim_api.set_ecus(env, mapp["nr_ecus"], 'SecureECU', self.ecu_spec)
def get_actions(self):
''' returns the connections that can be made '''
actions = {}
actions['valid_cert'] = 'Generate Valid Certificate'
return actions
def execute_action(self, env_connect, *args):
pass
def main_process(self, env, mapp):
print("Main")
def postprocess(self, env, mapp):
print("Post")
def _create_widgets(self, parent):
# Layout
GBuilder().set_props(self, None, 100, 100)
main_lo = QVBoxLayout()
self.setLayout(main_lo)
# Title
main_lo.addWidget(GBuilder().label(parent, "<b>Description:</b>"))
hl = QHBoxLayout()
self.desc_label = GBuilder().label(parent, "Add a new SecureECU. This ECU resembles the ECU Part in a Lightweight Authentication Mechanism.")
self.desc_label.setFixedWidth(400)
self.icon = GBuilder().image(parent, SecureECUAddWidget.GUI_ICON, 2)
hl.addWidget(self.desc_label)
hl.addWidget(self.icon)
main_lo.addLayout(hl)
line = GBuilder().hor_line(parent)
main_lo.addWidget(line);
# Constructor Inputs
main_lo.addWidget(GBuilder().label(parent, "<b>General Information:</b>"))
lo0, self.id_list = GBuilder().label_text(parent, "List of IDs (optional):", label_width=120)
lo1, self.send_buf_textedit = GBuilder().label_text(parent, "Sending BufferSize:", label_width=120)
lo2, self.rec_buf_textedit = GBuilder().label_text(parent, "Receiving Buffer Size:", label_width=120)
lo3, self.nr_ecus_textedit = GBuilder().label_text(parent, "Number of ECUs:", label_width=120)
main_lo.addLayout(lo0)
main_lo.addLayout(lo1)
main_lo.addLayout(lo2)
main_lo.addLayout(lo3)
# ECU Settings
items = self._get_ecu_settings()
hl, self.ecu_set_cb, self.ecu_set_te = GBuilder().combobox_text(parent, items, self._set_cb_changed)
self._cur_set_entry = self.ecu_set_cb.currentText()
main_lo.addLayout(hl)
# Timing Mapping
line = GBuilder().hor_line(parent)
main_lo.addWidget(line);
lab = GBuilder().label(parent, "<b>Timing and Certification:</b>")
lab.setFixedHeight(20)
main_lo.addWidget(lab)
itm = StdSecurECUTimingFunctions()
avail_items = itm.available_tags
items = itm.function_map.keys()
hl1 = QHBoxLayout()
self.ecu_set_time_cb = GBuilder().combobox(parent, items, self._set_time_cb_changed)
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
self.ecu_set_time_sel_cb = GBuilder().combobox(parent, avail_items, self._set_time_cb_changed)
self._cur_set_time_sel_entry = self.ecu_set_time_sel_cb.currentText()
hl1.addWidget(self.ecu_set_time_cb)
hl1.addWidget(self.ecu_set_time_sel_cb)
main_lo.addLayout(hl1)
# Certification (has a valid certificate or not)
# hl, self.has_sec_mod_cert_cb, lab = GBuilder().label_combobox(parent, "Has Security Module Certificate", ["Yes", "No"], self._has_sec_mod_cb_changed)
# main_lo.addLayout(hl)
def _get_ecu_settings(self):
SecureECU().settings = sorted(SecureECU().settings, key=lambda key: SecureECU().settings[key])
return SecureECU().settings
def _has_sec_mod_cb_changed(self):
try:
if self.has_sec_mod_cert_cb.currentText() == "Yes":
self.has_sec_mod_cert = True
else:
self.has_sec_mod_cert = False
except:
pass
def _set_time_cb_changed(self):
try:
# Save old value
if self._cur_set_time_entry == self.ecu_set_time_cb.currentText():
self.set_time_lib[self._cur_set_time_entry] = self.ecu_set_time_sel_cb.currentText()
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
return
# Load the next one
try:
index = self.ecu_set_time_sel_cb.findText(self.set_time_lib[self.ecu_set_time_cb.currentText()])
self.ecu_set_time_sel_cb.setCurrentIndex(index)
except:
self.ecu_set_time_sel_cb.setCurrentIndex(0)
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
except:
pass
def _set_cb_changed(self):
try:
# Save old value
if self.ecu_set_te.text():
self.set_ecu_settings[self._cur_set_entry] = self.ecu_set_te.text()
# Load the next one
try:
self.ecu_set_te.setText(self.set_ecu_settings[self.ecu_set_cb.currentText()])
except:
self.ecu_set_te.setText('')
self._cur_set_entry = self.ecu_set_cb.currentText()
except:
pass
def _wrap(self, func, prime, second):
try:
el = func(prime)
return el
except:
return second
class SecLwAuthSecurityModuleAddWidget(AbstractAddPlugin):
''' This is the interface that connects the GUI and the actions to
execute '''
GUI_NAME = "Sec. Module"
GUI_ICON = os.getcwd() + r'/icons/secmod_ecu.png'
def __init__(self, parent):
AbstractAddPlugin.__init__(self, parent)
self._create_widgets(parent)
self.set_ecu_settings = {}
self.set_time_lib = {}
self.has_sec_mod_cert = False
self.id = uuid.uuid4()
self.ecu_group = None
def get_actions(self):
''' returns the actions shown in the
context menu '''
actions = {}
actions['reg_ecus'] = 'Register ECU Groups'
actions['valid_cert'] = 'Generate valid certificates'
return actions
def set_gui(self, mapp):
''' set the gui from the map received '''
try:
if "id_list" in mapp: self.id_list.setText(str(mapp["id_list"]))
if "send_buffer" in mapp: self.send_buf_textedit.setText(str(mapp["send_buffer"]))
if "rec_buffer" in mapp: self.rec_buf_textedit.setText(str(mapp["rec_buffer"]))
if "nr_ecus" in mapp: self.nr_ecus_textedit.setText(str(mapp["nr_ecus"]))
if "ecu_settings" in mapp: self.set_ecu_settings = mapp["ecu_settings"]
self._set_cb_changed()
if "ecu_timing" in mapp: self.set_time_lib = mapp["ecu_timing"]
index = self.ecu_set_time_sel_cb.findText(self.set_time_lib[self.ecu_set_time_cb.currentText()])
self.ecu_set_time_sel_cb.setCurrentIndex(index)
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
if "has_sec_mod_cert" in mapp: self.has_sec_mod_cert = mapp["has_sec_mod_cert"]
if not self.has_sec_mod_cert: self.has_sec_mod_cert_cb.setCurrentIndex(0)
except:
ECULogger().log_traceback()
def get_map(self):
mapping_values = {}
# Read the values from the gui and save them
# General Information
mapping_values["id_list"] = self._wrap(eval, self.id_list.text(), [])
mapping_values["send_buffer"] = self._wrap(int, self.send_buf_textedit.text(), 200)
mapping_values["rec_buffer"] = self._wrap(int, self.rec_buf_textedit.text(), 200)
mapping_values["nr_ecus"] = self._wrap(int, self.nr_ecus_textedit.text(), 0)
if self.ecu_set_te.text():
self.set_ecu_settings[self._cur_set_entry] = self.ecu_set_te.text()
mapping_values["ecu_settings"] = self.set_ecu_settings
# Certification and Timing
self.set_time_lib[self._cur_set_time_entry] = self.ecu_set_time_sel_cb.currentText() # Final entry
mapping_values['ecu_timing'] = self.set_time_lib
mapping_values['has_sec_mod_cert'] = self.has_sec_mod_cert
return mapping_values
def preprocess(self, env, mapp):
self.ecu_spec = SimpleECUSpec(mapp["id_list"] , mapp["send_buffer"], mapp["rec_buffer"])
for k in mapp["ecu_settings"]:
self.ecu_spec.set_ecu_setting(k, mapp["ecu_settings"][k])
self.ecu_group = api.ecu_sim_api.set_ecus(env, mapp["nr_ecus"], 'SecureECU', self.ecu_spec)
def main_process(self, env, mapp):
print("Main")
def postprocess(self, env, mapp):
print("Post")
def _create_widgets(self, parent):
# Layout
GBuilder().set_props(self, None, 100, 100)
main_lo = QVBoxLayout()
self.setLayout(main_lo)
# Title
main_lo.addWidget(GBuilder().label(parent, "<b>Description:</b>"))
hl = QHBoxLayout()
self.desc_label = GBuilder().label(parent, "Add a new SecureECU. This ECU resembles the ECU Part in a Lightweight Authentication Mechanism.")
self.desc_label.setFixedWidth(400)
self.icon = GBuilder().image(parent, SecLwAuthSecurityModuleAddWidget.GUI_ICON, 2)
hl.addWidget(self.desc_label)
hl.addWidget(self.icon)
main_lo.addLayout(hl)
line = GBuilder().hor_line(parent)
main_lo.addWidget(line);
# Constructor Inputs
main_lo.addWidget(GBuilder().label(parent, "<b>General Information:</b>"))
lo0, self.id_list = GBuilder().label_text(parent, "List of IDs (optional):", label_width=120)
lo1, self.send_buf_textedit = GBuilder().label_text(parent, "Sending BufferSize:", label_width=120)
lo2, self.rec_buf_textedit = GBuilder().label_text(parent, "Receiving Buffer Size:", label_width=120)
lo3, self.nr_ecus_textedit = GBuilder().label_text(parent, "Number of ECUs:", label_width=120)
main_lo.addLayout(lo0)
main_lo.addLayout(lo1)
main_lo.addLayout(lo2)
main_lo.addLayout(lo3)
# ECU Settings
items = self._get_ecu_settings()
hl, self.ecu_set_cb, self.ecu_set_te = GBuilder().combobox_text(parent, items, self._set_cb_changed)
self._cur_set_entry = self.ecu_set_cb.currentText()
main_lo.addLayout(hl)
# Timing Mapping
line = GBuilder().hor_line(parent)
main_lo.addWidget(line)
lab = GBuilder().label(parent, "<b>Timing and Certification:</b>")
lab.setFixedHeight(20)
main_lo.addWidget(lab)
itm = StdSecurLwSecModTimingFunctions()
avail_items = itm.available_tags
items = itm.function_map.keys()
hl1 = QHBoxLayout()
self.ecu_set_time_cb = GBuilder().combobox(parent, items, self._set_time_cb_changed)
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
self.ecu_set_time_sel_cb = GBuilder().combobox(parent, avail_items, self._set_time_cb_changed)
self._cur_set_time_sel_entry = self.ecu_set_time_sel_cb.currentText()
hl1.addWidget(self.ecu_set_time_cb)
hl1.addWidget(self.ecu_set_time_sel_cb)
main_lo.addLayout(hl1)
# Certification (has a valid certificate or not)
# hl, self.has_sec_mod_cert_cb, lab = GBuilder().label_combobox(parent, "Has Security Module Certificate", ["Yes", "No"], self._has_sec_mod_cb_changed)
# main_lo.addLayout(hl)
def _get_ecu_settings(self):
sett = SecLwAuthSecurityModule()
sett.settings = sorted(sett.settings, key=lambda key: sett.settings[key])
return sett.settings
def _has_sec_mod_cb_changed(self):
try:
if self.has_sec_mod_cert_cb.currentText() == "Yes":
self.has_sec_mod_cert = True
else:
self.has_sec_mod_cert = False
except:
pass
def _set_time_cb_changed(self):
try:
# Save old value
if self._cur_set_time_entry == self.ecu_set_time_cb.currentText():
self.set_time_lib[self._cur_set_time_entry] = self.ecu_set_time_sel_cb.currentText()
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
return
# Load the next one
try:
index = self.ecu_set_time_sel_cb.findText(self.set_time_lib[self.ecu_set_time_cb.currentText()])
self.ecu_set_time_sel_cb.setCurrentIndex(index)
except:
self.ecu_set_time_sel_cb.setCurrentIndex(0)
self._cur_set_time_entry = self.ecu_set_time_cb.currentText()
except:
pass
def _set_cb_changed(self):
try:
# Save old value
if self.ecu_set_te.text():
self.set_ecu_settings[self._cur_set_entry] = self.ecu_set_te.text()
# Load the next one
try:
self.ecu_set_te.setText(self.set_ecu_settings[self.ecu_set_cb.currentText()])
except:
self.ecu_set_te.setText('')
self._cur_set_entry = self.ecu_set_cb.currentText()
except:
pass
def _wrap(self, func, prime, second):
try:
el = func(prime)
return el
except:
return second
class StdBusAddWidget(AbstractAddPlugin):
''' This is the interface that connects the GUI and the actions to
execute '''
GUI_NAME = "CAN BUS"
GUI_ICON = os.getcwd() + r'/icons/can.png'
def __init__(self, parent):
AbstractAddPlugin.__init__(self, parent)
self._create_widgets(parent)
self.parent = parent
self.id = uuid.uuid4()
self.bus_group = None
def set_gui(self, mapp):
''' set the gui from the map received '''
try:
if "id_list" in mapp: self.id_list.setText(str(mapp["id_list"]))
if "nr_busses" in mapp: self.nr_ecus_textedit.setText(str(mapp["nr_busses"]))
except:
ECULogger().log_traceback()
def get_map(self):
mapping_values = {}
# Read the values from the gui and save them
# General Information
mapping_values["id_list"] = self._wrap(eval, self.id_list.text(), [])
mapping_values["nr_busses"] = self._wrap(int, self.nr_ecus_textedit.text(), 0)
return mapping_values
def preprocess(self, env, mapp):
pass
def get_actions(self):
''' returns the connections that can be made '''
actions = {}
actions['connect_group'] = 'Connect ECU Group'
return actions
def execute_action(self, env_connect, *args):
pass
def main_process(self, env, mapp):
print("Main")
def postprocess(self, env, mapp):
print("Post")
def _create_widgets(self, parent):
# Layout
GBuilder().set_props(self, None, 100, 100)
main_lo = QVBoxLayout()
self.setLayout(main_lo)
# Title
main_lo.addWidget(GBuilder().label(parent, "<b>Description:</b>"))
hl = QHBoxLayout()
self.desc_label = GBuilder().label(parent, "Add a new Standard Bus. This CAN Bus is a simple implementation of a automotive link.")
self.desc_label.setFixedWidth(400)
self.icon = GBuilder().image(parent, StdBusAddWidget.GUI_ICON, 2)
hl.addWidget(self.desc_label)
hl.addWidget(self.icon)
main_lo.addLayout(hl)
line = GBuilder().hor_line(parent)
main_lo.addWidget(line);
# Constructor Inputs
main_lo.addWidget(GBuilder().label(parent, "<b>General Information:</b>"))
lo0, self.id_list = GBuilder().label_text(parent, "List of IDs (optional):", label_width=120)
lo1, self.nr_ecus_textedit = GBuilder().label_text(parent, "Number of Busses:", label_width=120)
main_lo.addLayout(lo0)
main_lo.addLayout(lo1)
def _wrap(self, func, prime, second):
try:
el = func(prime)
return el
except:
return second
|
|
"""
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanvar', 'nanstd'
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def nanmin(a, axis=None, out=None, keepdims=False):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original `a`.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmin.reduce(a, axis=axis, out=out, keepdims=keepdims)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, keepdims=keepdims)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, keepdims=keepdims)
if np.any(mask):
res = _copyto(res, mask, np.nan)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
return res
def nanmax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original `a`.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmax.reduce(a, axis=axis, out=out, keepdims=keepdims)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, keepdims=keepdims)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, keepdims=keepdims)
if np.any(mask):
res = _copyto(res, mask, np.nan)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
return res
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
results cannot be trusted if a slice contains only NaNs and -Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
a, mask = _replace_nan(a, -np.inf)
res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nansum(a, axis=None, dtype=None, out=None, keepdims=0):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
FutureWarning: In Numpy versions <= 1.8 Nan is returned for slices that
are all-NaN or empty. In later versions zero will be returned.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
.. versionadded:: 1.8.0
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray or numpy scalar
See Also
--------
numpy.sum : Sum across array propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Numpy integer arithmetic is modular. If the size of a sum exceeds the
size of an integer accumulator, its value will wrap around and the
result will be incorrect. Specifying ``dtype=double`` can alleviate
that problem.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
a, mask = _replace_nan(a, 0)
if mask is None:
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
mask = np.all(mask, axis=axis, keepdims=keepdims)
tot = np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if np.any(mask):
tot = _copyto(tot, np.nan, mask)
warnings.warn("In Numpy 1.9 the sum along empty slices will be zero.",
FutureWarning)
return tot
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for inexact inputs, it is the same as the input
dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original `arr`.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned. Nan is
returned for slices that contain only NaNs.
See Also
--------
average : Weighted average
mean : Arithmetic mean taken while not ignoring NaNs
var, nanvar
Notes
-----
The arithmetic mean is the sum of the non-NaN elements along the axis
divided by the number of non-NaN elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32`. Specifying a
higher-precision accumulator using the `dtype` keyword can alleviate
this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
array([ 2., 4.])
>>> np.nanmean(a, axis=1)
array([ 1., 3.5])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
# The warning context speeds things up.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
if isbad.any():
warnings.warn("Mean of empty slice", RuntimeWarning)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the variance along the specified axis, while ignoring NaNs.
Returns the variance of the array elements, a measure of the spread of
a distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
variance : ndarray, see dtype parameter above
If `out` is None, return a new array containing the variance,
otherwise return a reference to the output array. If ddof is >= the
number of non-NaN elements in a slice or the slice contains only
NaNs, then the result for that slice is NaN.
See Also
--------
std : Standard deviation
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite
population. ``ddof=0`` provides a maximum likelihood estimate of the
variance for normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.var(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
array([ 1., 0.])
>>> np.nanvar(a, axis=1)
array([ 0., 0.25])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Compute mean
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=True)
avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=True)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
arr -= avg
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
sqr = np.multiply(arr, arr.conj(), out=arr).real
else:
sqr = np.multiply(arr, arr, out=arr)
# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
var = _divide_by_count(var, dof)
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
Returns the standard deviation, a measure of the spread of a
distribution, of the non-NaN array elements. The standard deviation is
computed for the flattened array by default, otherwise over the
specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it
is the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard
deviation, otherwise return a reference to the output array. If
ddof is >= the number of non-NaN elements in a slice or the slice
contains only NaNs, then the result for that slice is NaN.
See Also
--------
var, mean, std
nanvar, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
specified, the divisor ``N - ddof`` is used instead. In standard
statistical practice, ``ddof=1`` provides an unbiased estimator of the
variance of the infinite population. ``ddof=0`` provides a maximum
likelihood estimate of the variance for normally distributed variables.
The standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute value before
squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example
below). Specifying a higher-accuracy accumulator using the `dtype`
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
array([ 1., 0.])
>>> np.nanstd(a, axis=1)
array([ 0., 0.5])
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
std = var.dtype.type(np.sqrt(var))
return std
|
|
"""HTTP websocket server functional tests"""
import asyncio
import pytest
import aiohttp
from aiohttp import web
from aiohttp.http import WSMsgType
@pytest.fixture
def ceil(mocker):
def ceil(val):
return val
mocker.patch('aiohttp.helpers.ceil').side_effect = ceil
async def test_websocket_can_prepare(loop, aiohttp_client):
async def handler(request):
ws = web.WebSocketResponse()
if not ws.can_prepare(request):
raise web.HTTPUpgradeRequired()
return web.Response()
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.get('/')
assert resp.status == 426
async def test_websocket_json(loop, aiohttp_client):
async def handler(request):
ws = web.WebSocketResponse()
if not ws.can_prepare(request):
return web.HTTPUpgradeRequired()
await ws.prepare(request)
msg = await ws.receive()
msg_json = msg.json()
answer = msg_json['test']
await ws.send_str(answer)
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
expected_value = 'value'
payload = '{"test": "%s"}' % expected_value
await ws.send_str(payload)
resp = await ws.receive()
assert resp.data == expected_value
async def test_websocket_json_invalid_message(loop, aiohttp_client):
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
try:
await ws.receive_json()
except ValueError:
await ws.send_str('ValueError was raised')
else:
raise Exception('No Exception')
finally:
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
payload = 'NOT A VALID JSON STRING'
await ws.send_str(payload)
data = await ws.receive_str()
assert 'ValueError was raised' in data
async def test_websocket_send_json(loop, aiohttp_client):
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
data = await ws.receive_json()
await ws.send_json(data)
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
expected_value = 'value'
await ws.send_json({'test': expected_value})
data = await ws.receive_json()
assert data['test'] == expected_value
async def test_websocket_receive_json(loop, aiohttp_client):
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
data = await ws.receive_json()
answer = data['test']
await ws.send_str(answer)
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
expected_value = 'value'
payload = '{"test": "%s"}' % expected_value
await ws.send_str(payload)
resp = await ws.receive()
assert resp.data == expected_value
async def test_send_recv_text(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
await ws.send_str(msg+'/answer')
await ws.close()
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.send_str('ask')
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.TEXT
assert 'ask/answer' == msg.data
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
assert msg.data == 1000
assert msg.extra == ''
assert ws.closed
assert ws.close_code == 1000
await closed
async def test_send_recv_bytes(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_bytes()
await ws.send_bytes(msg+b'/answer')
await ws.close()
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.send_bytes(b'ask')
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.BINARY
assert b'ask/answer' == msg.data
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
assert msg.data == 1000
assert msg.extra == ''
assert ws.closed
assert ws.close_code == 1000
await closed
async def test_send_recv_json(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
data = await ws.receive_json()
await ws.send_json({'response': data['request']})
await ws.close()
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.send_str('{"request": "test"}')
msg = await ws.receive()
data = msg.json()
assert msg.type == aiohttp.WSMsgType.TEXT
assert data['response'] == 'test'
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
assert msg.data == 1000
assert msg.extra == ''
await ws.close()
await closed
async def test_close_timeout(loop, aiohttp_client):
aborted = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse(timeout=0.1)
await ws.prepare(request)
assert 'request' == (await ws.receive_str())
await ws.send_str('reply')
begin = ws._loop.time()
assert (await ws.close())
elapsed = ws._loop.time() - begin
assert elapsed < 0.201, \
'close() should have returned before ' \
'at most 2x timeout.'
assert ws.close_code == 1006
assert isinstance(ws.exception(), asyncio.TimeoutError)
aborted.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.send_str('request')
assert 'reply' == (await ws.receive_str())
# The server closes here. Then the client sends bogus messages with an
# internval shorter than server-side close timeout, to make the server
# hanging indefinitely.
await asyncio.sleep(0.08, loop=loop)
msg = await ws._reader.read()
assert msg.type == WSMsgType.CLOSE
await ws.send_str('hang')
# i am not sure what do we test here
# under uvloop this code raises RuntimeError
try:
await asyncio.sleep(0.08, loop=loop)
await ws.send_str('hang')
await asyncio.sleep(0.08, loop=loop)
await ws.send_str('hang')
await asyncio.sleep(0.08, loop=loop)
await ws.send_str('hang')
except RuntimeError:
pass
await asyncio.sleep(0.08, loop=loop)
assert (await aborted)
await ws.close()
async def test_concurrent_close(loop, aiohttp_client):
srv_ws = None
async def handler(request):
nonlocal srv_ws
ws = srv_ws = web.WebSocketResponse(
autoclose=False, protocols=('foo', 'bar'))
await ws.prepare(request)
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSING
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSING
await asyncio.sleep(0, loop=loop)
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSED
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoclose=False,
protocols=('eggs', 'bar'))
await srv_ws.close(code=1007)
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSE
await asyncio.sleep(0, loop=loop)
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSED
async def test_auto_pong_with_closing_by_peer(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive()
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSE
assert msg.data == 1000
assert msg.extra == 'exit message'
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoclose=False, autoping=False)
await ws.ping()
await ws.send_str('ask')
msg = await ws.receive()
assert msg.type == WSMsgType.PONG
await ws.close(code=1000, message='exit message')
await closed
async def test_ping(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.ping('data')
await ws.receive()
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoping=False)
msg = await ws.receive()
assert msg.type == WSMsgType.PING
assert msg.data == b'data'
await ws.pong()
await ws.close()
await closed
async def aiohttp_client_ping(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive()
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoping=False)
await ws.ping('data')
msg = await ws.receive()
assert msg.type == WSMsgType.PONG
assert msg.data == b'data'
await ws.pong()
await ws.close()
async def test_pong(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse(autoping=False)
await ws.prepare(request)
msg = await ws.receive()
assert msg.type == WSMsgType.PING
await ws.pong('data')
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSE
assert msg.data == 1000
assert msg.extra == 'exit message'
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoping=False)
await ws.ping('data')
msg = await ws.receive()
assert msg.type == WSMsgType.PONG
assert msg.data == b'data'
await ws.close(code=1000, message='exit message')
await closed
async def test_change_status(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
ws.set_status(200)
assert 200 == ws.status
await ws.prepare(request)
assert 101 == ws.status
await ws.close()
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoping=False)
await ws.close()
await closed
await ws.close()
async def test_handle_protocol(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse(protocols=('foo', 'bar'))
await ws.prepare(request)
await ws.close()
assert 'bar' == ws.ws_protocol
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', protocols=('eggs', 'bar'))
await ws.close()
await closed
async def test_server_close_handshake(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse(protocols=('foo', 'bar'))
await ws.prepare(request)
await ws.close()
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoclose=False,
protocols=('eggs', 'bar'))
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSE
await ws.close()
await closed
async def aiohttp_client_close_handshake(loop, aiohttp_client, ceil):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse(
autoclose=False, protocols=('foo', 'bar'))
await ws.prepare(request)
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSE
assert not ws.closed
await ws.close()
assert ws.closed
assert ws.close_code == 1007
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSED
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoclose=False,
protocols=('eggs', 'bar'))
await ws.close(code=1007)
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSED
await closed
async def test_server_close_handshake_server_eats_client_messages(
loop, aiohttp_client
):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse(protocols=('foo', 'bar'))
await ws.prepare(request)
await ws.close()
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoclose=False, autoping=False,
protocols=('eggs', 'bar'))
msg = await ws.receive()
assert msg.type == WSMsgType.CLOSE
await ws.send_str('text')
await ws.send_bytes(b'bytes')
await ws.ping()
await ws.close()
await closed
async def test_receive_timeout(loop, aiohttp_client):
raised = False
async def handler(request):
ws = web.WebSocketResponse(receive_timeout=0.1)
await ws.prepare(request)
try:
await ws.receive()
except asyncio.TimeoutError:
nonlocal raised
raised = True
await ws.close()
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.receive()
await ws.close()
assert raised
async def test_custom_receive_timeout(loop, aiohttp_client):
raised = False
async def handler(request):
ws = web.WebSocketResponse(receive_timeout=None)
await ws.prepare(request)
try:
await ws.receive(0.1)
except asyncio.TimeoutError:
nonlocal raised
raised = True
await ws.close()
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.receive()
await ws.close()
assert raised
async def test_heartbeat(loop, aiohttp_client, ceil):
async def handler(request):
ws = web.WebSocketResponse(heartbeat=0.05)
await ws.prepare(request)
await ws.receive()
await ws.close()
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoping=False)
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.ping
await ws.close()
async def test_heartbeat_no_pong(loop, aiohttp_client, ceil):
cancelled = False
async def handler(request):
nonlocal cancelled
ws = web.WebSocketResponse(heartbeat=0.05)
await ws.prepare(request)
try:
await ws.receive()
except asyncio.CancelledError:
cancelled = True
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/', autoping=False)
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.ping
await ws.receive()
assert cancelled
async def test_server_ws_async_for(loop, aiohttp_server):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
async for msg in ws:
assert msg.type == aiohttp.WSMsgType.TEXT
s = msg.data
await ws.send_str(s + '/answer')
await ws.close()
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
server = await aiohttp_server(app)
async with aiohttp.ClientSession(loop=loop) as sm:
async with sm.ws_connect(server.make_url('/')) as resp:
items = ['q1', 'q2', 'q3']
for item in items:
await resp.send_str(item)
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.TEXT
assert item + '/answer' == msg.data
await resp.close()
await closed
async def test_closed_async_for(loop, aiohttp_client):
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
messages = []
async for msg in ws:
messages.append(msg)
if 'stop' == msg.data:
await ws.send_str('stopping')
await ws.close()
assert 1 == len(messages)
assert messages[0].type == WSMsgType.TEXT
assert messages[0].data == 'stop'
closed.set_result(None)
return ws
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
ws = await client.ws_connect('/')
await ws.send_str('stop')
msg = await ws.receive()
assert msg.type == WSMsgType.TEXT
assert msg.data == 'stopping'
await ws.close()
await closed
async def test_websocket_disable_keepalive(loop, aiohttp_client):
async def handler(request):
ws = web.WebSocketResponse()
if not ws.can_prepare(request):
return web.Response(text='OK')
assert request.protocol._keepalive
await ws.prepare(request)
assert not request.protocol._keepalive
assert not request.protocol._keepalive_handle
await ws.send_str('OK')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.get('/')
txt = await resp.text()
assert txt == 'OK'
ws = await client.ws_connect('/')
data = await ws.receive_str()
assert data == 'OK'
|
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import caffe_pb2
import flask
import lmdb
import PIL.Image
from .forms import ImageClassificationDatasetForm
from .job import ImageClassificationDatasetJob
from digits import utils
from digits.dataset import tasks
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
def from_folders(job, form):
"""
Add tasks for creating a dataset by parsing folders of images
"""
job.labels_file = utils.constants.LABELS_FILE
### Add ParseFolderTask
percent_val = form.folder_pct_val.data
val_parents = []
if form.has_val_folder.data:
percent_val = 0
percent_test = form.folder_pct_test.data
test_parents = []
if form.has_test_folder.data:
percent_test = 0
min_per_class = form.folder_train_min_per_class.data
max_per_class = form.folder_train_max_per_class.data
parse_train_task = tasks.ParseFolderTask(
job_dir = job.dir(),
folder = form.folder_train.data,
percent_val = percent_val,
percent_test = percent_test,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_train_task)
# set parents
if not form.has_val_folder.data:
val_parents = [parse_train_task]
if not form.has_test_folder.data:
test_parents = [parse_train_task]
if form.has_val_folder.data:
min_per_class = form.folder_val_min_per_class.data
max_per_class = form.folder_val_max_per_class.data
parse_val_task = tasks.ParseFolderTask(
job_dir = job.dir(),
parents = parse_train_task,
folder = form.folder_val.data,
percent_val = 100,
percent_test = 0,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_val_task)
val_parents = [parse_val_task]
if form.has_test_folder.data:
min_per_class = form.folder_test_min_per_class.data
max_per_class = form.folder_test_max_per_class.data
parse_test_task = tasks.ParseFolderTask(
job_dir = job.dir(),
parents = parse_train_task,
folder = form.folder_test.data,
percent_val = 0,
percent_test = 100,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_test_task)
test_parents = [parse_test_task]
### Add CreateDbTasks
backend = form.backend.data
encoding = form.encoding.data
compression = form.compression.data
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = parse_train_task,
input_file = utils.constants.TRAIN_FILE,
db_name = utils.constants.TRAIN_DB,
backend = backend,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
mean_file = utils.constants.MEAN_FILE_CAFFE,
labels_file = job.labels_file,
)
)
if percent_val > 0 or form.has_val_folder.data:
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = val_parents,
input_file = utils.constants.VAL_FILE,
db_name = utils.constants.VAL_DB,
backend = backend,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
)
)
if percent_test > 0 or form.has_test_folder.data:
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = test_parents,
input_file = utils.constants.TEST_FILE,
db_name = utils.constants.TEST_DB,
backend = backend,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
)
)
def from_files(job, form):
"""
Add tasks for creating a dataset by reading textfiles
"""
### labels
if form.textfile_use_local_files.data:
job.labels_file = form.textfile_local_labels_file.data.strip()
else:
flask.request.files[form.textfile_labels_file.name].save(
os.path.join(job.dir(), utils.constants.LABELS_FILE)
)
job.labels_file = utils.constants.LABELS_FILE
shuffle = bool(form.textfile_shuffle.data)
backend = form.backend.data
encoding = form.encoding.data
compression = form.compression.data
### train
if form.textfile_use_local_files.data:
train_file = form.textfile_local_train_images.data.strip()
else:
flask.request.files[form.textfile_train_images.name].save(
os.path.join(job.dir(), utils.constants.TRAIN_FILE)
)
train_file = utils.constants.TRAIN_FILE
image_folder = form.textfile_train_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = train_file,
db_name = utils.constants.TRAIN_DB,
backend = backend,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
mean_file = utils.constants.MEAN_FILE_CAFFE,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
### val
if form.textfile_use_val.data:
if form.textfile_use_local_files.data:
val_file = form.textfile_local_val_images.data.strip()
else:
flask.request.files[form.textfile_val_images.name].save(
os.path.join(job.dir(), utils.constants.VAL_FILE)
)
val_file = utils.constants.VAL_FILE
image_folder = form.textfile_val_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = val_file,
db_name = utils.constants.VAL_DB,
backend = backend,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
### test
if form.textfile_use_test.data:
if form.textfile_use_local_files.data:
test_file = form.textfile_local_test_images.data.strip()
else:
flask.request.files[form.textfile_test_images.name].save(
os.path.join(job.dir(), utils.constants.TEST_FILE)
)
test_file = utils.constants.TEST_FILE
image_folder = form.textfile_test_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = test_file,
db_name = utils.constants.TEST_DB,
backend = backend,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
@blueprint.route('/new', methods=['GET'])
@utils.auth.requires_login
def new():
"""
Returns a form for a new ImageClassificationDatasetJob
"""
form = ImageClassificationDatasetForm()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('datasets/images/classification/new.html', form=form)
@blueprint.route('.json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create():
"""
Creates a new ImageClassificationDatasetJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationDatasetForm()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('datasets/images/classification/new.html', form=form), 400
job = None
try:
job = ImageClassificationDatasetJob(
username = utils.auth.get_username(),
name = form.dataset_name.data,
image_dims = (
int(form.resize_height.data),
int(form.resize_width.data),
int(form.resize_channels.data),
),
resize_mode = form.resize_mode.data
)
if form.method.data == 'folder':
from_folders(job, form)
elif form.method.data == 'textfile':
from_files(job, form)
else:
raise ValueError('method not supported')
## Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.dataset.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
def show(job):
"""
Called from digits.dataset.views.datasets_show()
"""
return flask.render_template('datasets/images/classification/show.html', job=job)
@blueprint.route('/summary', methods=['GET'])
def summary():
"""
Return a short HTML summary of a DatasetJob
"""
job = job_from_request()
return flask.render_template('datasets/images/classification/summary.html', dataset=job)
class DbReader(object):
"""
Reads a database
"""
def __init__(self, location):
"""
Arguments:
location -- where is the database
"""
self._db = lmdb.open(location,
map_size=1024**3, # 1MB
readonly=True, lock=False)
with self._db.begin() as txn:
self.total_entries = txn.stat()['entries']
def entries(self):
"""
Generator returning all entries in the DB
"""
with self._db.begin() as txn:
cursor = txn.cursor()
for item in cursor:
yield item
@blueprint.route('/explore', methods=['GET'])
def explore():
"""
Returns a gallery consisting of the images of one of the dbs
"""
job = job_from_request()
# Get LMDB
db = flask.request.args.get('db', 'train')
if 'train' in db.lower():
task = job.train_db_task()
elif 'val' in db.lower():
task = job.val_db_task()
elif 'test' in db.lower():
task = job.test_db_task()
if task is None:
raise ValueError('No create_db task for {0}'.format(db))
if task.status != 'D':
raise ValueError("This create_db task's status should be 'D' but is '{0}'".format(task.status))
if task.backend != 'lmdb':
raise ValueError("Backend is {0} while expected backend is lmdb".format(task.backend))
db_path = job.path(task.db_name)
labels = task.get_labels()
page = int(flask.request.args.get('page', 0))
size = int(flask.request.args.get('size', 25))
label = flask.request.args.get('label', None)
if label is not None:
try:
label = int(label)
label_str = labels[label]
except ValueError:
label = None
reader = DbReader(db_path)
count = 0
imgs = []
min_page = max(0, page - 5)
if label is None:
total_entries = reader.total_entries
else:
total_entries = task.distribution[str(label)]
max_page = min((total_entries-1) / size, page + 5)
pages = range(min_page, max_page + 1)
for key, value in reader.entries():
if count >= page*size:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if label is None or datum.label == label:
if datum.encoded:
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
else:
import caffe.io
arr = caffe.io.datum_to_array(datum)
# CHW -> HWC
arr = arr.transpose((1,2,0))
if arr.shape[2] == 1:
# HWC -> HW
arr = arr[:,:,0]
elif arr.shape[2] == 3:
# BGR -> RGB
# XXX see issue #59
arr = arr[:,:,[2,1,0]]
img = PIL.Image.fromarray(arr)
imgs.append({"label":labels[datum.label], "b64": utils.image.embed_image_html(img)})
if label is None:
count += 1
else:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if datum.label == int(label):
count += 1
if len(imgs) >= size:
break
return flask.render_template('datasets/images/classification/explore.html', page=page, size=size, job=job, imgs=imgs, labels=labels, pages=pages, label=label, total_entries=total_entries, db=db)
|
|
"""
JSON serializers for Company app
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from sql_util.utils import SubqueryCount
from InvenTree.serializers import InvenTreeDecimalField
from InvenTree.serializers import InvenTreeImageSerializerField
from InvenTree.serializers import InvenTreeModelSerializer
from InvenTree.serializers import InvenTreeMoneySerializer
from part.serializers import PartBriefSerializer
from .models import Company
from .models import ManufacturerPart, ManufacturerPartParameter
from .models import SupplierPart, SupplierPriceBreak
from common.settings import currency_code_default, currency_code_mappings
class CompanyBriefSerializer(InvenTreeModelSerializer):
""" Serializer for Company object (limited detail) """
url = serializers.CharField(source='get_absolute_url', read_only=True)
image = serializers.CharField(source='get_thumbnail_url', read_only=True)
class Meta:
model = Company
fields = [
'pk',
'url',
'name',
'description',
'image',
]
class CompanySerializer(InvenTreeModelSerializer):
""" Serializer for Company object (full detail) """
@staticmethod
def annotate_queryset(queryset):
# Add count of parts manufactured
queryset = queryset.annotate(
parts_manufactured=SubqueryCount('manufactured_parts')
)
queryset = queryset.annotate(
parts_supplied=SubqueryCount('supplied_parts')
)
return queryset
url = serializers.CharField(source='get_absolute_url', read_only=True)
image = InvenTreeImageSerializerField(required=False, allow_null=True)
parts_supplied = serializers.IntegerField(read_only=True)
parts_manufactured = serializers.IntegerField(read_only=True)
currency = serializers.ChoiceField(
choices=currency_code_mappings(),
initial=currency_code_default,
help_text=_('Default currency used for this supplier'),
label=_('Currency Code'),
required=True,
)
class Meta:
model = Company
fields = [
'pk',
'url',
'name',
'description',
'website',
'name',
'phone',
'address',
'email',
'currency',
'contact',
'link',
'image',
'is_customer',
'is_manufacturer',
'is_supplier',
'notes',
'parts_supplied',
'parts_manufactured',
]
class ManufacturerPartSerializer(InvenTreeModelSerializer):
"""
Serializer for ManufacturerPart object
"""
part_detail = PartBriefSerializer(source='part', many=False, read_only=True)
manufacturer_detail = CompanyBriefSerializer(source='manufacturer', many=False, read_only=True)
pretty_name = serializers.CharField(read_only=True)
def __init__(self, *args, **kwargs):
part_detail = kwargs.pop('part_detail', True)
manufacturer_detail = kwargs.pop('manufacturer_detail', True)
prettify = kwargs.pop('pretty', False)
super(ManufacturerPartSerializer, self).__init__(*args, **kwargs)
if part_detail is not True:
self.fields.pop('part_detail')
if manufacturer_detail is not True:
self.fields.pop('manufacturer_detail')
if prettify is not True:
self.fields.pop('pretty_name')
manufacturer = serializers.PrimaryKeyRelatedField(queryset=Company.objects.filter(is_manufacturer=True))
class Meta:
model = ManufacturerPart
fields = [
'pk',
'part',
'part_detail',
'pretty_name',
'manufacturer',
'manufacturer_detail',
'description',
'MPN',
'link',
]
class ManufacturerPartParameterSerializer(InvenTreeModelSerializer):
"""
Serializer for the ManufacturerPartParameter model
"""
manufacturer_part_detail = ManufacturerPartSerializer(source='manufacturer_part', many=False, read_only=True)
def __init__(self, *args, **kwargs):
man_detail = kwargs.pop('manufacturer_part_detail', False)
super(ManufacturerPartParameterSerializer, self).__init__(*args, **kwargs)
if not man_detail:
self.fields.pop('manufacturer_part_detail')
class Meta:
model = ManufacturerPartParameter
fields = [
'pk',
'manufacturer_part',
'manufacturer_part_detail',
'name',
'value',
'units',
]
class SupplierPartSerializer(InvenTreeModelSerializer):
""" Serializer for SupplierPart object """
part_detail = PartBriefSerializer(source='part', many=False, read_only=True)
supplier_detail = CompanyBriefSerializer(source='supplier', many=False, read_only=True)
manufacturer_detail = CompanyBriefSerializer(source='manufacturer_part.manufacturer', many=False, read_only=True)
pretty_name = serializers.CharField(read_only=True)
def __init__(self, *args, **kwargs):
part_detail = kwargs.pop('part_detail', True)
supplier_detail = kwargs.pop('supplier_detail', True)
manufacturer_detail = kwargs.pop('manufacturer_detail', True)
prettify = kwargs.pop('pretty', False)
super(SupplierPartSerializer, self).__init__(*args, **kwargs)
if part_detail is not True:
self.fields.pop('part_detail')
if supplier_detail is not True:
self.fields.pop('supplier_detail')
if manufacturer_detail is not True:
self.fields.pop('manufacturer_detail')
if prettify is not True:
self.fields.pop('pretty_name')
supplier = serializers.PrimaryKeyRelatedField(queryset=Company.objects.filter(is_supplier=True))
manufacturer = serializers.CharField(read_only=True)
MPN = serializers.CharField(read_only=True)
manufacturer_part_detail = ManufacturerPartSerializer(source='manufacturer_part', read_only=True)
class Meta:
model = SupplierPart
fields = [
'description',
'link',
'manufacturer',
'manufacturer_detail',
'manufacturer_part',
'manufacturer_part_detail',
'MPN',
'note',
'pk',
'packaging',
'part',
'part_detail',
'pretty_name',
'SKU',
'supplier',
'supplier_detail',
]
def create(self, validated_data):
""" Extract manufacturer data and process ManufacturerPart """
# Create SupplierPart
supplier_part = super().create(validated_data)
# Get ManufacturerPart raw data (unvalidated)
manufacturer = self.initial_data.get('manufacturer', None)
MPN = self.initial_data.get('MPN', None)
if manufacturer and MPN:
kwargs = {
'manufacturer': manufacturer,
'MPN': MPN,
}
supplier_part.save(**kwargs)
return supplier_part
class SupplierPriceBreakSerializer(InvenTreeModelSerializer):
""" Serializer for SupplierPriceBreak object """
quantity = InvenTreeDecimalField()
price = InvenTreeMoneySerializer(
allow_null=True,
required=True,
label=_('Price'),
)
price_currency = serializers.ChoiceField(
choices=currency_code_mappings(),
default=currency_code_default,
label=_('Currency'),
)
class Meta:
model = SupplierPriceBreak
fields = [
'pk',
'part',
'quantity',
'price',
'price_currency',
]
|
|
#!/usr/bin/env python
# Copyright (c) 2014 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This program reads a log file and generates summary information for
time trace information in the files.
"""
from __future__ import division, print_function
from glob import glob
from optparse import OptionParser
import math
import os
import re
import string
import sys
# This variable collects all the times for all events, individually. It is
# a dictionary that maps from key names to a list containing all of the
# intervals for that event name (each interval is the elapsed time between
# the most recent previous event and this event).
eventIntervals = {}
# This variable collects information for all events relative to a given
# starting event (the --from command line option).
#
# relativeEvents:
# dictionary (event name => OccurrenceList)
#
# The same event may sometimes happen multiple times for single occurrence
# of the starting event. An OccurrenceList is a list, where the nth entry
# describes all the events that occurred after n prior occurrences of the
# event.
# OccurrenceList:
# list (OccurrenceInfo)
#
# OccurrenceInfo:
# dictionary (
# times: list() One entry for each event: elapsed ns between
# the starting event and this event
# intervals: list() One entry for each event: elapsed ns between
# immediately preceding event and this event
# )
relativeEvents = {}
# This variable contains a count of the number of times each event has
# occurred since the last time the starting event occurred.
eventCount = {}
def scan(f, startingEvent):
"""
Scan the log file given by 'f' (handle for an open file) and collect
information from time trace records as described by the arguments.
If 'startingEvent' isn't None, it specifies an event indicating the
beginning of a related sequence of event; times are accumulated for all
other events, relative to the most recent occurrence of the starting event.
"""
foundStart = False
startTime = 0.0
lastTime = -1.0
for line in f:
match = re.match('.*TimeTrace\\.cc:.*printInternal.* '
'([0-9.]+) ns \(\+ *([0-9.]+) ns\): (.*)', line)
if not match:
continue
thisEventTime = float(match.group(1))
thisEventInterval = float(match.group(2))
thisEvent = match.group(3)
if (thisEventTime < lastTime):
print('Time went backwards at the following line:\n%s' % (line))
lastTime = thisEventTime
if (thisEventTime != 0.0) :
if not thisEvent in eventIntervals:
eventIntervals[thisEvent] = []
eventIntervals[thisEvent].append(thisEventInterval)
# print('%s %s %s' % (thisEventTime, thisEventInterval, thisEvent))
if startingEvent:
if string.find(thisEvent, startingEvent) >= 0:
# Reset variables to indicate that we are starting a new
# sequence of events from the starting event.
startTime = thisEventTime
foundStart = True
eventCount = {}
if not foundStart:
continue
# If we get here, it means that we have found an event that
# is not the starting event, and startTime indicates the time of
# the starting event. First, see how many times this event has
# occurred since the last occurrence of the starting event.
relativeTime = thisEventTime - startTime
# print('%.1f %.1f %s' % (relativeTime, thisEventInterval, thisEvent))
if thisEvent in eventCount:
count = eventCount[thisEvent] + 1
else:
count = 1
eventCount[thisEvent] = count
# print("Count for '%s': %d" % (thisEvent, count))
if not thisEvent in relativeEvents:
relativeEvents[thisEvent] = []
occurrences = relativeEvents[thisEvent]
while len(occurrences) < count:
occurrences.append({'times': [], 'intervals': []})
occurrences[count-1]['times'].append(relativeTime)
occurrences[count-1]['intervals'].append(thisEventInterval)
# Parse command line options
parser = OptionParser(description=
'Read one or more log files and summarize the time trace information '
'present in the file(s) as specified by the arguments.',
usage='%prog [options] file file ...',
conflict_handler='resolve')
parser.add_option('-f', '--from', type='string', dest='startEvent',
help='measure times for other events relative to FROM; FROM contains a '
'substring of an event')
(options, files) = parser.parse_args()
if len(files) == 0:
print("No log files given")
sys.exit(1)
for name in files:
scan(open(name), options.startEvent)
# Print information about all events, unless --from was specified.
if not options.startEvent:
# Do this in 2 passes. First, generate a string describing each
# event; then sort the list of messages and print.
# Each entry in the following variable will contain a list with
# 2 elements: time to use for sorting, and string to print.
outputInfo = []
# Compute the length of the longest event name.
nameLength = 0;
for event in eventIntervals.keys():
nameLength = max(nameLength, len(event))
# Each iteration through the following loop processes one event name
for event in eventIntervals.keys():
intervals = eventIntervals[event]
intervals.sort()
medianTime = intervals[len(intervals)//2]
message = '%-*s %8.1f %8.1f %8.1f %8.1f %7d' % (nameLength,
event, medianTime, intervals[0], intervals[-1],
sum(intervals)/len(intervals), len(intervals))
outputInfo.append([medianTime, message])
# Pass 2: sort in order of median interval length, then print.
outputInfo.sort(key=lambda item: item[0], reverse=True)
print('%-*s Median Min Max Average Count' % (nameLength,
"Event"))
print('%s---------------------------------------------' %
('-' * nameLength))
for message in outputInfo:
print(message[1])
# Print output for the --from option. First, process each event occurrence,
# then sort them by elapsed time from the starting event.
if options.startEvent:
# Each entry in the following variable will contain a list with
# 2 elements: time to use for sorting, and string to print.
outputInfo = []
# Compute the length of the longest event name.
nameLength = 0;
for event in relativeEvents.keys():
occurrences = relativeEvents[event]
thisLength = len(event)
if len(occurrences) > 1:
thisLength += len(' (#%d)' % (len(occurrences)))
nameLength = max(nameLength, thisLength)
# Each iteration through the following loop processes one event name
for event in relativeEvents.keys():
occurrences = relativeEvents[event]
# Each iteration through the following loop processes the nth
# occurrences of this event.
for i in range(len(occurrences)):
eventName = event
if i != 0:
eventName = '%s (#%d)' % (event, i+1)
times = occurrences[i]['times']
intervals = occurrences[i]['intervals']
times.sort()
medianTime = times[len(times)//2]
intervals.sort()
message = '%-*s %8.1f %8.1f %8.1f %8.1f %8.1f %7d' % (nameLength,
eventName, medianTime, times[0], times[-1],
sum(times)/len(times), intervals[len(intervals)//2],
len(times))
outputInfo.append([medianTime, message])
outputInfo.sort(key=lambda item: item[0])
print('%-*s Median Min Max Average Delta Count' % (nameLength,
"Event"))
print('%s------------------------------------------------------' %
('-' * nameLength))
for message in outputInfo:
print(message[1])
|
|
from __future__ import division, print_function
from functools import partial
from itertools import product
from itertools import chain
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import zero_one_loss
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"adjusted_balanced_accuracy_score": partial(balanced_accuracy_score,
adjusted=True),
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
# `confusion_matrix` returns absolute values and hence behaves unnormalized
# . Naming it with an unnormalized_ prefix is neccessary for this module to
# skip sample_weight scaling checks which will fail for unnormalized
# metrics.
"unnormalized_confusion_matrix": confusion_matrix,
"normalized_confusion_matrix": lambda *args, **kwargs: (
confusion_matrix(*args, **kwargs).astype('float') / confusion_matrix(
*args, **kwargs).sum(axis=1)[:, np.newaxis]
),
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array([
precision,
recall,
np.pad(thresholds,
pad_width=(0, pad_threshholds),
mode='constant',
constant_values=[np.nan])
])
CURVE_METRICS = {
"roc_curve": roc_curve,
"precision_recall_curve": precision_recall_curve_padded_thresholds,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score, # default: average="macro"
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"partial_roc_auc": partial(roc_auc_score, max_fpr=0.5),
"average_precision_score":
average_precision_score, # default: average="macro"
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
ALL_METRICS.update(CURVE_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = {
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"label_ranking_loss",
"label_ranking_average_precision_score",
}
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = {
"brier_score_loss",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"samples_roc_auc",
"partial_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
# curves
"roc_curve",
"precision_recall_curve",
}
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(
METRIC_UNDEFINED_MULTICLASS)
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = {
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
}
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = {
"roc_auc_score", "average_precision_score", "partial_roc_auc",
}
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = {
"roc_curve",
"precision_recall_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
}
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = {
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
}
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = {
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
}
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = {
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "partial_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"coverage_error", "label_ranking_loss",
"label_ranking_average_precision_score",
}
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
}
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = {
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
}
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
}
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = {
"balanced_accuracy_score",
"adjusted_balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
}
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = {
"median_absolute_error",
}
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(SYMMETRIC_METRICS.union(
NOT_SYMMETRIC_METRICS, set(THRESHOLDED_METRICS),
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
SYMMETRIC_METRICS.intersection(NOT_SYMMETRIC_METRICS),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred), metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
# use context manager to supply custom error message
with assert_raises(AssertionError) as cm:
assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true))
cm.msg = ("%s seems to be symmetric" % name)
@pytest.mark.parametrize(
'name',
set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@pytest.mark.parametrize(
'name',
set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
def test_format_invariance_with_1d_vectors(name):
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_array_equal(y1_1d.ndim, 1)
assert_array_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
with ignore_warnings():
metric = ALL_METRICS[name]
measure = metric(y1, y2)
assert_allclose(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant with list"
"" % name)
assert_allclose(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant with "
"np-array-1d" % name)
assert_allclose(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant with "
"np-array-column" % name)
# Mix format support
assert_allclose(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
assert_allclose(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS |
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@pytest.mark.parametrize(
'name',
set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
def test_classification_invariance_string_vs_numbers_labels(name):
# Ensure that classification metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
with ignore_warnings():
metric = CLASSIFICATION_METRICS[name]
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
@pytest.mark.parametrize('name', THRESHOLDED_METRICS)
def test_thresholded_invariance_string_vs_numbers_labels(name):
# Ensure that thresholded metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
pos_label_str = "spam"
with ignore_warnings():
metric = THRESHOLDED_METRICS[name]
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
invalids = [([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
@pytest.mark.parametrize(
'metric',
chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values()))
def test_regression_thresholded_inf_nan_input(metric):
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
@pytest.mark.parametrize('metric', CLASSIFICATION_METRICS.values())
def test_classification_inf_nan_input(metric):
# Classification metrics all raise a mixed input exception
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Classification metrics can't handle a mix "
"of binary and continuous targets",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
@pytest.mark.parametrize(
'name',
(set(ALL_METRICS)
# Those metrics are not always defined with one sample
# or in multiclass classification
- METRIC_UNDEFINED_BINARY_MULTICLASS
- set(THRESHOLDED_METRICS)))
def test_single_sample(name):
check_single_sample(name)
@pytest.mark.parametrize('name', MULTIOUTPUT_METRICS | MULTILABELS_METRICS)
def test_single_sample_multioutput(name):
check_single_sample_multioutput(name)
@pytest.mark.parametrize('name', MULTIOUTPUT_METRICS)
def test_multioutput_number_of_output_differ(name):
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
@pytest.mark.parametrize('name', MULTIOUTPUT_METRICS)
def test_multioutput_regression_invariance_to_dimension_shuffling(name):
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_allclose(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling invariant" % (
name))
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_allclose(metric(y1_sparse_indicator, y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance between "
"dense and sparse indicator formats." % name)
@pytest.mark.parametrize('name', MULTILABELS_METRICS)
def test_raise_value_error_multilabel_sequences(name):
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
@pytest.mark.parametrize('name', METRICS_WITH_NORMALIZE_OPTION)
def test_normalize_option_binary_classification(name):
# Test in the binary case
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_array_less(-1.0 * measure, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(metrics(y_true, y_pred, normalize=False) / n_samples,
measure)
@pytest.mark.parametrize('name', METRICS_WITH_NORMALIZE_OPTION)
def test_normalize_option_multiclass_classification(name):
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_array_less(-1.0 * measure, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(metrics(y_true, y_pred, normalize=False) / n_samples,
measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_array_less(-1.0 * measure, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(metrics(y_true, y_pred, normalize=False) / n_samples,
measure, err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(label_measure,
[metric(y_true_binarize[:, i], y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_allclose(micro_measure,
metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_allclose(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure,
np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_allclose(sample_measure,
np.mean([metric(y_true_binarize[i], y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
@pytest.mark.parametrize('name', METRICS_WITH_AVERAGING)
def test_averaging_multiclass(name):
n_samples, n_classes = 50, 3
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize(
'name', METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING)
def test_averaging_multilabel(name):
n_samples, n_classes = 40, 5
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize('name', METRICS_WITH_AVERAGING)
def test_averaging_multilabel_all_zeroes(name):
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
def test_averaging_binary_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
@pytest.mark.parametrize('name', METRICS_WITH_AVERAGING)
def test_averaging_multilabel_all_ones(name):
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_allclose(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
# use context manager to supply custom error message
with assert_raises(AssertionError) as cm:
assert_allclose(unweighted_score, weighted_score)
cm.msg = ("Unweighted and weighted scores are unexpectedly almost "
"equal (%s) and (%s) for %s" % (unweighted_score,
weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_allclose(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%s != %s) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_allclose(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_allclose(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%s != %s) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_allclose(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = ("Found input variables with inconsistent numbers of "
"samples: [{}, {}, {}]".format(
_num_samples(y1), _num_samples(y2),
_num_samples(sample_weight) * 2))
assert_raise_message(ValueError, error_message, metric, y1, y2,
sample_weight=np.hstack([sample_weight,
sample_weight]))
@pytest.mark.parametrize(
'name',
(set(ALL_METRICS).intersection(set(REGRESSION_METRICS))
- METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_regression_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
(set(ALL_METRICS) - set(REGRESSION_METRICS)
- METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))
def test_binary_sample_weight_invariance(name):
# binary
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
(set(ALL_METRICS) - set(REGRESSION_METRICS)
- METRICS_WITHOUT_SAMPLE_WEIGHT
- METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_multiclass_sample_weight_invariance(name):
# multiclass
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
(MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS |
MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT)
def test_multilabel_sample_weight_invariance(name):
# multilabel indicator
random_state = check_random_state(0)
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
|
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
"""
API for OGC Web Services Common (OWS) constructs and metadata.
OWS Common: http://www.opengeospatial.org/standards/common
Currently supports version 1.1.0 (06-121r3).
"""
import logging
from owslib.etree import etree
from owslib import crs, util
from owslib.namespaces import Namespaces
LOGGER = logging.getLogger(__name__)
n = Namespaces()
OWS_NAMESPACE_1_0_0 = n.get_namespace("ows")
OWS_NAMESPACE_1_1_0 = n.get_namespace("ows110")
OWS_NAMESPACE_2_0_0 = n.get_namespace("ows200")
XSI_NAMESPACE = n.get_namespace("xsi")
XLINK_NAMESPACE = n.get_namespace("xlink")
DEFAULT_OWS_NAMESPACE = OWS_NAMESPACE_1_1_0 # Use this as default for OWSCommon objects
class OwsCommon(object):
"""Initialize OWS Common object"""
def __init__(self, version):
self.version = version
if version == '1.0.0':
self.namespace = OWS_NAMESPACE_1_0_0
elif version == '1.1.0':
self.namespace = OWS_NAMESPACE_1_1_0
else:
self.namespace = OWS_NAMESPACE_2_0_0
class ServiceIdentification(object):
"""Initialize an OWS Common ServiceIdentification construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('Title', namespace))
self.title = util.testXMLValue(val)
val = self._root.find(util.nspath('Abstract', namespace))
self.abstract = util.testXMLValue(val)
self.keywords = []
for f in self._root.findall(util.nspath('Keywords/Keyword', namespace)):
if f.text is not None:
self.keywords.append(f.text)
val = self._root.find(util.nspath('Keywords/Type', namespace))
self.keywords_type = util.testXMLValue(val)
val = self._root.find(util.nspath('AccessConstraints', namespace))
self.accessconstraints = util.testXMLValue(val)
val = self._root.find(util.nspath('Fees', namespace))
self.fees = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceType', namespace))
self.type = util.testXMLValue(val)
self.service = self.type # alternative? keep both?discuss
val = self._root.find(util.nspath('ServiceTypeVersion', namespace))
self.version = util.testXMLValue(val)
self.versions = []
for v in self._root.findall(util.nspath('ServiceTypeVersion', namespace)):
self.versions.append(util.testXMLValue(v))
self.profiles = []
for p in self._root.findall(util.nspath('Profile', namespace)):
self.profiles.append(util.testXMLValue(p))
def __str__(self):
return 'Service: {}, title={}'.format(self.service, self.title or '')
def __repr__(self):
return '<owslib.ows.ServiceIdentification {} at {}>'.format(self.service, hex(id(self)))
class ServiceProvider(object):
"""Initialize an OWS Common ServiceProvider construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.contact = ServiceContact(infoset, namespace)
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
try:
urlattrib = val.attrib[util.nspath('href', XLINK_NAMESPACE)]
self.url = util.testXMLValue(urlattrib, True)
except KeyError:
self.url = None
else:
self.url = None
class ServiceContact(object):
"""Initialize an OWS Common ServiceContact construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.organization = util.testXMLValue(
self._root.find(util.nspath('ContactPersonPrimary/ContactOrganization', namespace)))
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
self.site = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.site = None
val = self._root.find(util.nspath('ServiceContact/Role', namespace))
self.role = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/IndividualName', namespace))
self.name = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/PositionName', namespace))
self.position = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Voice', namespace))
self.phone = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Facsimile', namespace))
self.fax = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/DeliveryPoint', namespace))
self.address = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/City', namespace))
self.city = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/AdministrativeArea', namespace))
self.region = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/PostalCode', namespace))
self.postcode = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/Country', namespace))
self.country = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/ElectronicMailAddress', namespace))
self.email = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/OnlineResource', namespace))
if val is not None:
self.url = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.url = None
val = self._root.find(util.nspath('ServiceContact/ContactInfo/HoursOfService', namespace))
self.hours = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/ContactInstructions', namespace))
self.instructions = util.testXMLValue(val)
class Constraint(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Constraint: %s - %s" % (self.name, self.values)
else:
return "Constraint: %s" % self.name
class Parameter(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Parameter: %s - %s" % (self.name, self.values)
else:
return "Parameter: %s" % self.name
class OperationsMetadata(object):
"""Initialize an OWS OperationMetadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
if 'name' not in elem.attrib: # This is not a valid element
return
self.name = elem.attrib['name']
self.formatOptions = ['text/xml']
parameters = []
self.methods = []
self.constraints = []
for verb in elem.findall(util.nspath('DCP/HTTP/*', namespace)):
url = util.testXMLAttribute(verb, util.nspath('href', XLINK_NAMESPACE))
if url is not None:
verb_constraints = [Constraint(conts, namespace) for conts in verb.findall(
util.nspath('Constraint', namespace))]
self.methods.append({'constraints': verb_constraints, 'type': util.xmltag_split(verb.tag), 'url': url})
for parameter in elem.findall(util.nspath('Parameter', namespace)):
if namespace == OWS_NAMESPACE_1_1_0:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(
util.nspath('AllowedValues/Value', namespace))]}))
else:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(
util.nspath('Value', namespace))]}))
self.parameters = dict(parameters)
for constraint in elem.findall(util.nspath('Constraint', namespace)):
self.constraints.append(Constraint(constraint, namespace))
def __str__(self):
return "Operation: {}, format={}".format(self.name, self.formatOptions)
def __repr__(self):
return '<owslib.ows.OperationsMetadata {} at {}>'.format(self.name, hex(id(self)))
class BoundingBox(object):
"""Initialize an OWS BoundingBox construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.minx = None
self.miny = None
self.maxx = None
self.maxy = None
self.crs = None
self.dimensions = 2
if elem is None:
return
val = elem.attrib.get('crs') or elem.attrib.get('{{{}}}crs'.format(namespace))
if val:
try:
self.crs = crs.Crs(val)
except (AttributeError, ValueError):
LOGGER.warning('Invalid CRS %r. Expected integer' % val)
else:
self.crs = None
val = elem.attrib.get('dimensions') or elem.attrib.get('{{{}}}dimensions'.format(namespace))
if val is not None:
self.dimensions = int(util.testXMLValue(val, True))
else: # assume 2
self.dimensions = 2
val = elem.find(util.nspath('LowerCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.minx, self.miny = xy[1], xy[0]
else:
self.minx, self.miny = xy[0], xy[1]
val = elem.find(util.nspath('UpperCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.maxx, self.maxy = xy[1], xy[0]
else:
self.maxx, self.maxy = xy[0], xy[1]
class WGS84BoundingBox(BoundingBox):
"""WGS84 bbox, axis order xy"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
BoundingBox.__init__(self, elem, namespace)
self.dimensions = 2
self.crs = crs.Crs('urn:ogc:def:crs:OGC:2:84')
class ExceptionReport(Exception):
"""OWS ExceptionReport"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.exceptions = []
if hasattr(elem, 'getroot'):
elem = elem.getroot()
for i in elem.findall(util.nspath('Exception', namespace)):
tmp = {}
val = i.attrib.get('exceptionCode')
tmp['exceptionCode'] = util.testXMLValue(val, True)
val = i.attrib.get('locator')
tmp['locator'] = util.testXMLValue(val, True)
val = i.find(util.nspath('ExceptionText', namespace))
tmp['ExceptionText'] = util.testXMLValue(val)
self.exceptions.append(tmp)
# set topmost stacktrace as return message
self.code = self.exceptions[0]['exceptionCode']
self.locator = self.exceptions[0]['locator']
self.msg = self.exceptions[0]['ExceptionText']
self.xml = etree.tostring(elem)
def __str__(self):
return repr(self.msg)
|
|
# Copyright 2014 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the VMware Datastore backend store"""
import hashlib
import uuid
import mock
from oslo_utils import units
from oslo_vmware import api
from oslo_vmware.exceptions import FileNotFoundException
from oslo_vmware.objects import datacenter as oslo_datacenter
from oslo_vmware.objects import datastore as oslo_datastore
import six
import glance_store._drivers.vmware_datastore as vm_store
from glance_store import backend
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
from glance_store.tests import utils
FAKE_UUID = str(uuid.uuid4())
FIVE_KB = 5 * units.Ki
VMWARE_DS = {
'verbose': True,
'debug': True,
'known_stores': ['vmware_datastore'],
'default_store': 'vsphere',
'vmware_server_host': '127.0.0.1',
'vmware_server_username': 'username',
'vmware_server_password': 'password',
'vmware_datacenter_path': 'dc1',
'vmware_datastore_name': 'ds1',
'vmware_store_image_dir': '/openstack_glance',
'vmware_api_insecure': 'True',
}
def format_location(host_ip, folder_name,
image_id, datacenter_path, datastore_name):
"""
Helper method that returns a VMware Datastore store URI given
the component pieces.
"""
scheme = 'vsphere'
return ("%s://%s/folder%s/%s?dcPath=%s&dsName=%s"
% (scheme, host_ip, folder_name,
image_id, datacenter_path, datastore_name))
class FakeHTTPConnection(object):
def __init__(self, status=200, *args, **kwargs):
self.status = status
pass
def getresponse(self):
return utils.FakeHTTPResponse(status=self.status)
def request(self, *_args, **_kwargs):
pass
def close(self):
pass
def fake_datastore_obj(*args, **kwargs):
dc_obj = oslo_datacenter.Datacenter(ref='fake-ref',
name='fake-name')
dc_obj.path = args[0]
return oslo_datastore.Datastore(ref='fake-ref',
datacenter=dc_obj,
name=args[1])
class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch('oslo_vmware.api.VMwareAPISession')
def setUp(self, mock_api_session, mock_get_datastore):
"""Establish a clean test environment."""
super(TestStore, self).setUp()
vm_store.Store.CHUNKSIZE = 2
self.config(default_store='vmware', stores=['vmware'])
backend.register_opts(self.conf)
self.config(group='glance_store',
vmware_server_username='admin',
vmware_server_password='admin',
vmware_server_host=VMWARE_DS['vmware_server_host'],
vmware_api_insecure=VMWARE_DS['vmware_api_insecure'],
vmware_datastore_name=VMWARE_DS['vmware_datastore_name'],
vmware_datacenter_path=VMWARE_DS['vmware_datacenter_path'])
mock_get_datastore.side_effect = fake_datastore_obj
backend.create_stores(self.conf)
self.store = backend.get_store_from_scheme('vsphere')
self.store.store_image_dir = (
VMWARE_DS['vmware_store_image_dir'])
def _mock_http_connection(self):
return mock.patch('six.moves.http_client.HTTPConnection')
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get(self, mock_api_session):
"""Test a "normal" retrieval of an image in chunks."""
expected_image_size = 31
expected_returns = ['I am a teapot, short and stout\n']
loc = location.get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, conf=self.conf)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
(image_file, image_size) = self.store.get(loc)
self.assertEqual(image_size, expected_image_size)
chunks = [c for c in image_file]
self.assertEqual(expected_returns, chunks)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_non_existing(self, mock_api_session):
"""
Test that trying to retrieve an image that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glan"
"ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID, conf=self.conf)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=404)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(vm_store._Reader, 'size')
@mock.patch.object(api, 'VMwareAPISession')
def test_add(self, fake_api_session, fake_size, fake_select_datastore):
"""Test that we can add an image via the VMware backend."""
fake_select_datastore.return_value = self.store.datastores[0][0]
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
hash_code = hashlib.md5(expected_contents)
expected_checksum = hash_code.hexdigest()
fake_size.__get__ = mock.Mock(return_value=expected_size)
with mock.patch('hashlib.md5') as md5:
md5.return_value = hash_code
expected_location = format_location(
VMWARE_DS['vmware_server_host'],
VMWARE_DS['vmware_store_image_dir'],
expected_image_id,
VMWARE_DS['vmware_datacenter_path'],
VMWARE_DS['vmware_datastore_name'])
image = six.BytesIO(expected_contents)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
location, size, checksum, _ = self.store.add(expected_image_id,
image,
expected_size)
self.assertEqual(utils.sort_url_by_qs_keys(expected_location),
utils.sort_url_by_qs_keys(location))
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(vm_store._Reader, 'size')
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_add_size_zero(self, mock_api_session, fake_size,
fake_select_datastore):
"""
Test that when specifying size zero for the image to add,
the actual size of the image is returned.
"""
fake_select_datastore.return_value = self.store.datastores[0][0]
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
hash_code = hashlib.md5(expected_contents)
expected_checksum = hash_code.hexdigest()
fake_size.__get__ = mock.Mock(return_value=expected_size)
with mock.patch('hashlib.md5') as md5:
md5.return_value = hash_code
expected_location = format_location(
VMWARE_DS['vmware_server_host'],
VMWARE_DS['vmware_store_image_dir'],
expected_image_id,
VMWARE_DS['vmware_datacenter_path'],
VMWARE_DS['vmware_datastore_name'])
image = six.BytesIO(expected_contents)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
location, size, checksum, _ = self.store.add(expected_image_id,
image, 0)
self.assertEqual(utils.sort_url_by_qs_keys(expected_location),
utils.sort_url_by_qs_keys(location))
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_delete(self, mock_api_session):
"""Test we can delete an existing image in the VMware store."""
loc = location.get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s?"
"dsName=ds1&dcPath=dc1" % FAKE_UUID, conf=self.conf)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
vm_store.Store._service_content = mock.Mock()
self.store.delete(loc)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=404)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_delete_non_existing(self, mock_api_session):
"""
Test that trying to delete an image that doesn't exist raises an error
"""
loc = location.get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s?"
"dsName=ds1&dcPath=dc1" % FAKE_UUID, conf=self.conf)
with mock.patch.object(self.store.session,
'wait_for_task') as mock_task:
mock_task.side_effect = FileNotFoundException
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_size(self, mock_api_session):
"""
Test we can get the size of an existing image in the VMware store
"""
loc = location.get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, conf=self.conf)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 31)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_size_non_existing(self, mock_api_session):
"""
Test that trying to retrieve an image size that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glan"
"ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID, conf=self.conf)
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=404)
self.assertRaises(exceptions.NotFound, self.store.get_size, loc)
def test_reader_full(self):
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._Reader(image)
ret = reader.read()
self.assertEqual(content, ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(len(content), reader.size)
def test_reader_partial(self):
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = hashlib.md5(b'X').hexdigest()
reader = vm_store._Reader(image)
ret = reader.read(1)
self.assertEqual(b'X', ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(1, reader.size)
def test_chunkreader_image_fits_in_blocksize(self):
"""
Test that the image file reader returns the expected chunk of data
when the block size is larger than the image.
"""
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._ChunkReader(image)
ret = reader.read()
if six.PY3:
expected_chunk = ('%x\r\n%s\r\n'
% (len(content), content.decode('ascii')))
expected_chunk = expected_chunk.encode('ascii')
else:
expected_chunk = b'%x\r\n%s\r\n' % (len(content), content)
last_chunk = b'0\r\n\r\n'
self.assertEqual(expected_chunk + last_chunk, ret)
self.assertEqual(len(content), reader.size)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertTrue(reader.closed)
ret = reader.read()
self.assertEqual(len(content), reader.size)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertTrue(reader.closed)
self.assertEqual(b'', ret)
def test_chunkreader_image_larger_blocksize(self):
"""
Test that the image file reader returns the expected chunks when
the block size specified is smaller than the image.
"""
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
last_chunk = b'0\r\n\r\n'
reader = vm_store._ChunkReader(image, blocksize=1)
ret = reader.read()
expected_chunk = b'1\r\nX\r\n'
expected = (expected_chunk + expected_chunk + expected_chunk
+ last_chunk)
self.assertEqual(expected,
ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(len(content), reader.size)
self.assertTrue(reader.closed)
def test_chunkreader_size(self):
"""Test that the image reader takes into account the specified size."""
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._ChunkReader(image, blocksize=1)
ret = reader.read(size=3)
self.assertEqual(b'1\r\n', ret)
ret = reader.read(size=1)
self.assertEqual(b'X', ret)
ret = reader.read()
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(len(content), reader.size)
self.assertTrue(reader.closed)
def test_sanity_check_api_retry_count(self):
"""Test that sanity check raises if api_retry_count is <= 0."""
self.store.conf.glance_store.vmware_api_retry_count = -1
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._sanity_check)
self.store.conf.glance_store.vmware_api_retry_count = 0
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._sanity_check)
self.store.conf.glance_store.vmware_api_retry_count = 1
try:
self.store._sanity_check()
except exceptions.BadStoreConfiguration:
self.fail()
def test_sanity_check_task_poll_interval(self):
"""Test that sanity check raises if task_poll_interval is <= 0."""
self.store.conf.glance_store.vmware_task_poll_interval = -1
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._sanity_check)
self.store.conf.glance_store.vmware_task_poll_interval = 0
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._sanity_check)
self.store.conf.glance_store.vmware_task_poll_interval = 1
try:
self.store._sanity_check()
except exceptions.BadStoreConfiguration:
self.fail()
def test_sanity_check_multiple_datastores(self):
self.store.conf.glance_store.vmware_api_retry_count = 1
self.store.conf.glance_store.vmware_task_poll_interval = 1
# Check both vmware_datastore_name and vmware_datastores defined.
self.store.conf.glance_store.vmware_datastores = ['a:b:0']
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._sanity_check)
# Both vmware_datastore_name and vmware_datastores are not defined.
self.store.conf.glance_store.vmware_datastore_name = None
self.store.conf.glance_store.vmware_datastores = None
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._sanity_check)
self.store.conf.glance_store.vmware_datastore_name = None
self.store.conf.glance_store.vmware_datastores = ['a:b:0', 'a:d:0']
try:
self.store._sanity_check()
except exceptions.BadStoreConfiguration:
self.fail()
def test_parse_datastore_info_and_weight_less_opts(self):
datastore = 'a'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
def test_parse_datastore_info_and_weight_invalid_weight(self):
datastore = 'a:b:c'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
def test_parse_datastore_info_and_weight_empty_opts(self):
datastore = 'a: :0'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
datastore = ':b:0'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
def test_parse_datastore_info_and_weight(self):
datastore = 'a:b:100'
parts = self.store._parse_datastore_info_and_weight(datastore)
self.assertEqual('a', parts[0])
self.assertEqual('b', parts[1])
self.assertEqual('100', parts[2])
def test_parse_datastore_info_and_weight_default_weight(self):
datastore = 'a:b'
parts = self.store._parse_datastore_info_and_weight(datastore)
self.assertEqual('a', parts[0])
self.assertEqual('b', parts[1])
self.assertEqual(0, parts[2])
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_unexpected_status(self, mock_api_session, mock_select_datastore):
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
image = six.BytesIO(expected_contents)
self.session = mock.Mock()
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=401)
self.assertRaises(exceptions.BackendException,
self.store.add,
expected_image_id, image, expected_size)
@mock.patch.object(api, 'VMwareAPISession')
def test_reset_session(self, mock_api_session):
self.store.reset_session()
self.assertTrue(mock_api_session.called)
@mock.patch.object(api, 'VMwareAPISession')
def test_build_vim_cookie_header_active(self, mock_api_session):
self.store.session.is_current_session_active = mock.Mock()
self.store.session.is_current_session_active.return_value = True
self.store._build_vim_cookie_header(True)
self.assertFalse(mock_api_session.called)
@mock.patch.object(api, 'VMwareAPISession')
def test_build_vim_cookie_header_expired(self, mock_api_session):
self.store.session.is_current_session_active = mock.Mock()
self.store.session.is_current_session_active.return_value = False
self.store._build_vim_cookie_header(True)
self.assertTrue(mock_api_session.called)
@mock.patch.object(api, 'VMwareAPISession')
def test_build_vim_cookie_header_expired_noverify(self, mock_api_session):
self.store.session.is_current_session_active = mock.Mock()
self.store.session.is_current_session_active.return_value = False
self.store._build_vim_cookie_header()
self.assertFalse(mock_api_session.called)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_add_ioerror(self, mock_api_session, mock_select_datastore):
mock_select_datastore.return_value = self.store.datastores[0][0]
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
image = six.BytesIO(expected_contents)
self.session = mock.Mock()
with self._mock_http_connection() as HttpConn:
HttpConn.request.side_effect = IOError
self.assertRaises(exceptions.BackendException,
self.store.add,
expected_image_id, image, expected_size)
def test_qs_sort_with_literal_question_mark(self):
url = 'scheme://example.com/path?key2=val2&key1=val1?sort=true'
exp_url = 'scheme://example.com/path?key1=val1%3Fsort%3Dtrue&key2=val2'
self.assertEqual(exp_url,
utils.sort_url_by_qs_keys(url))
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_build_datastore_weighted_map(self, mock_api_session, mock_ds_obj):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
mock_ds_obj.side_effect = fake_datastore_obj
ret = self.store._build_datastore_weighted_map(datastores)
ds = ret[200]
self.assertEqual('e', ds[0].datacenter.path)
self.assertEqual('f', ds[0].name)
ds = ret[100]
self.assertEqual(2, len(ds))
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_build_datastore_weighted_map_equal_weight(self, mock_api_session,
mock_ds_obj):
datastores = ['a:b:200', 'a:b:200']
mock_ds_obj.side_effect = fake_datastore_obj
ret = self.store._build_datastore_weighted_map(datastores)
ds = ret[200]
self.assertEqual(2, len(ds))
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_build_datastore_weighted_map_empty_list(self, mock_api_session,
mock_ds_ref):
datastores = []
ret = self.store._build_datastore_weighted_map(datastores)
self.assertEqual({}, ret)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_insufficient_freespace(self, mock_get_freespace,
mock_ds_ref):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
image_size = 10
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [5, 5, 5]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
self.assertRaises(exceptions.StorageFull,
self.store.select_datastore, image_size)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_insufficient_fs_one_ds(self, mock_get_freespace,
mock_ds_ref):
# Tests if fs is updated with just one datastore.
datastores = ['a:b:100']
image_size = 10
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [5]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
self.assertRaises(exceptions.StorageFull,
self.store.select_datastore, image_size)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_equal_freespace(self, mock_get_freespace,
mock_ds_obj):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
image_size = 10
mock_ds_obj.side_effect = fake_datastore_obj
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [11, 11, 11]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
ds = self.store.select_datastore(image_size)
self.assertEqual('e', ds.datacenter.path)
self.assertEqual('f', ds.name)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_contention(self, mock_get_freespace,
mock_ds_obj):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
image_size = 10
mock_ds_obj.side_effect = fake_datastore_obj
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [5, 11, 12]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
ds = self.store.select_datastore(image_size)
self.assertEqual('c', ds.datacenter.path)
self.assertEqual('d', ds.name)
def test_select_datastore_empty_list(self):
datastores = []
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
self.assertRaises(exceptions.StorageFull,
self.store.select_datastore, 10)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_datacenter_ref(self, mock_api_session):
datacenter_path = 'Datacenter1'
self.store._get_datacenter(datacenter_path)
self.store.session.invoke_api.assert_called_with(
self.store.session.vim,
'FindByInventoryPath',
self.store.session.vim.service_content.searchIndex,
inventoryPath=datacenter_path)
|
|
#***
#*********************************************************************
#*************************************************************************
#***
#*** GizmoDaemon Config Script
#*** LIRCMceUSB2 MythTV config
#***
#*****************************************
#*****************************************
#***
"""
Copyright (c) 2007, Gizmo Daemon Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
############################
# Imports
##########################
from GizmoDaemon import *
from GizmoScriptActiveApplication import *
from GizmoScriptAltTabber import *
import subprocess
ENABLED = True
VERSION_NEEDED = 3.2
INTERESTED_CLASSES = [GizmoEventClass.LIRC]
INTERESTED_WINDOWS = ["mythfrontend"]
USES_LIRC_REMOTES = ["mceusb", "mceusb2"]
POWER_APPLICATION = "mythfrontend"
############################
# LIRCMceUSB2MythTV Class definition
##########################
class LIRCMceUSB2MythTV(GizmoScriptActiveApplication):
"""
MythTV LIRC Event Mapping for the MceUSB2 remote
"""
############################
# Public Functions
##########################
def onDeviceEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
# if the event isn't from the remote we're interested in don't handle it
if Event.Remote not in USES_LIRC_REMOTES:
return False
# process the key
if Event.Button == "Power":
# if mythfrontend is open, kill it
subprocess.Popen(["killall", "mythfrontend"])
return True
elif Event.Button == "TV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_A)
return True
elif Event.Button == "Music":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_B)
return True
elif Event.Button == "Pictures":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH)
return True
elif Event.Button == "Videos":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Stop":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "Record":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_R)
return True
elif Event.Button == "Pause":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Rewind":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_COMMA, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Play":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Forward":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOT, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Replay":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEUP)
return True
elif Event.Button == "Back":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ESC)
return True
elif Event.Button == "Up":
return False
elif Event.Button == "Skip":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEDOWN)
return True
elif Event.Button == "More":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_M)
return True
elif Event.Button == "Left":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFT)
return True
elif Event.Button == "OK":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ENTER)
return True
elif Event.Button == "Right":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHT)
return True
elif Event.Button == "Down":
return False
elif Event.Button == "VolUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHTBRACE)
return True
elif Event.Button == "VolDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFTBRACE)
return True
elif Event.Button == "Home":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_END)
return True
elif Event.Button == "ChanUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_UP)
return True
elif Event.Button == "ChanDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOWN)
return True
elif Event.Button == "RecTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_HOME)
return True
elif Event.Button == "Mute":
return False
elif Event.Button == "DVD":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_H)
return True
elif Event.Button == "Guide":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "LiveTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_N)
return True
elif Event.Button == "One":
return False
elif Event.Button == "Two":
return False
elif Event.Button == "Three":
return False
elif Event.Button == "Four":
return False
elif Event.Button == "Five":
return False
elif Event.Button == "Six":
return False
elif Event.Button == "Seven":
return False
elif Event.Button == "Eight":
return False
elif Event.Button == "Nine":
return False
elif Event.Button == "Star":
return False
elif Event.Button == "Zero":
return False
elif Event.Button == "Hash":
return False
elif Event.Button == "Clear":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_C)
return True
elif Event.Button == "Enter":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_I)
return True
else:
# unmatched event, keep processing
return False
def onEvent(self, Event, Gizmo = None):
"""
Overloading Base Class' onEvent method!
Make sure to call it!
"""
# check for power button
# if pressed and mythfrontend isn't running, then launch it
# also return False so that other scripts may make use of the power
# button as well
if Event.Class in self.InterestedClasses \
and Event.Remote in USES_LIRC_REMOTES \
and Event.Button == "Power" \
and Gizmod.isProcessRunning(POWER_APPLICATION) < 0:
subprocess.Popen([POWER_APPLICATION])
Gizmod.updateProcessTree() # force an instantaneous process tree update
return False
# call base classe' onEvent method
return GizmoScriptActiveApplication.onEvent(self, Event, Gizmo)
############################
# Private Functions
##########################
def __init__(self):
"""
Default Constructor
"""
GizmoScriptActiveApplication.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES, INTERESTED_WINDOWS)
############################
# LIRCMceUSB2MythTV class end
##########################
# register the user script
LIRCMceUSB2MythTV()
|
|
from test.lib.testing import eq_, assert_raises, assert_raises_message
import datetime
from sqlalchemy.schema import CreateSequence, DropSequence
from sqlalchemy.sql import select, text, literal_column
import sqlalchemy as sa
from test.lib import testing, engines
from sqlalchemy import MetaData, Integer, String, ForeignKey, Boolean, exc,\
Sequence, func, literal, Unicode
from sqlalchemy.types import TypeDecorator, TypeEngine
from test.lib.schema import Table, Column
from test.lib.testing import eq_
from sqlalchemy.dialects import sqlite
from test.lib import fixtures
class DefaultTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global t, f, f2, ts, currenttime, metadata, default_generator
db = testing.db
metadata = MetaData(db)
default_generator = {'x':50}
def mydefault():
default_generator['x'] += 1
return default_generator['x']
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text('13')])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
try:
return conn.execute(sa.select([sa.text('12')])).scalar()
finally:
# ensure a "close()" on this connection does nothing,
# since its a "branched" connection
conn.close()
use_function_defaults = testing.against('postgresql', 'mssql', 'maxdb')
is_oracle = testing.against('oracle')
# select "count(1)" returns different results on different DBs also
# correct for "current_date" compatible as column default, value
# differences
currenttime = func.current_date(type_=sa.Date, bind=db)
if is_oracle:
ts = db.scalar(sa.select([func.trunc(func.sysdate(), sa.literal_column("'DAY'"), type_=sa.Date).label('today')]))
assert isinstance(ts, datetime.date) and not isinstance(ts, datetime.datetime)
f = sa.select([func.length('abcdef')], bind=db).scalar()
f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar()
# TODO: engine propigation across nested functions not working
currenttime = func.trunc(currenttime, sa.literal_column("'DAY'"), bind=db, type_=sa.Date)
def1 = currenttime
def2 = func.trunc(sa.text("sysdate"), sa.literal_column("'DAY'"), type_=sa.Date)
deftype = sa.Date
elif use_function_defaults:
f = sa.select([func.length('abcdef')], bind=db).scalar()
f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar()
def1 = currenttime
deftype = sa.Date
if testing.against('maxdb'):
def2 = sa.text("curdate")
elif testing.against('mssql'):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = db.scalar(func.current_date())
else:
f = len('abcdef')
f2 = len('abcdefghijk')
def1 = def2 = "3"
ts = 3
deftype = Integer
t = Table('default_test1', metadata,
# python function
Column('col1', Integer, primary_key=True,
default=mydefault),
# python literal
Column('col2', String(20),
default="imthedefault",
onupdate="im the update"),
# preexecute expression
Column('col3', Integer,
default=func.length('abcdef'),
onupdate=func.length('abcdefghijk')),
# SQL-side default from sql expression
Column('col4', deftype,
server_default=def1),
# SQL-side default from literal expression
Column('col5', deftype,
server_default=def2),
# preexecute + update timestamp
Column('col6', sa.Date,
default=currenttime,
onupdate=currenttime),
Column('boolcol1', sa.Boolean, default=True),
Column('boolcol2', sa.Boolean, default=False),
# python function which uses ExecutionContext
Column('col7', Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx),
# python builtin
Column('col8', sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today),
# combo
Column('col9', String(20),
default='py',
server_default='ddl'))
t.create()
@classmethod
def teardown_class(cls):
t.drop()
def teardown(self):
default_generator['x'] = 50
t.delete().execute()
def test_bad_arg_signature(self):
ex_msg = \
"ColumnDefault Python function takes zero or one positional arguments"
def fn1(x, y): pass
def fn2(x, y, z=3): pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(sa.exc.ArgumentError,
ex_msg,
sa.ColumnDefault, fn)
def test_arg_signature(self):
def fn1(): pass
def fn2(): pass
def fn3(x=1): pass
def fn4(x=1, y=2, z=3): pass
fn5 = list
class fn6(object):
def __init__(self, x):
pass
class fn6(object):
def __init__(self, x, y=3):
pass
class FN7(object):
def __call__(self, x):
pass
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
pass
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6, fn7, fn8:
c = sa.ColumnDefault(fn)
@testing.fails_on('firebird', 'Data type unknown')
def test_standalone(self):
c = testing.db.engine.contextual_connect()
x = c.execute(t.c.col1.default)
y = t.c.col2.default.execute()
z = c.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, 'imthedefault')
eq_(z, f)
eq_(f2, 11)
def test_py_vs_server_default_detection(self):
def has_(name, *wanted):
slots = ['default', 'onupdate', 'server_default', 'server_onupdate']
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
tbl = t
has_('col1', 'default')
has_('col2', 'default', 'onupdate')
has_('col3', 'default', 'onupdate')
has_('col4', 'server_default')
has_('col5', 'server_default')
has_('col6', 'default', 'onupdate')
has_('boolcol1', 'default')
has_('boolcol2', 'default')
has_('col7', 'default', 'onupdate')
has_('col8', 'default', 'onupdate')
has_('col9', 'default', 'server_default')
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
t2 = Table('t2', MetaData(),
Column('col1', Integer, Sequence('foo')),
Column('col2', Integer,
default=Sequence('foo'),
server_default='y'),
Column('col3', Integer,
Sequence('foo'),
server_default='x'),
Column('col4', Integer,
ColumnDefault('x'),
DefaultClause('y')),
Column('col4', Integer,
ColumnDefault('x'),
DefaultClause('y'),
DefaultClause('y', for_update=True)),
Column('col5', Integer,
ColumnDefault('x'),
DefaultClause('y'),
onupdate='z'),
Column('col6', Integer,
ColumnDefault('x'),
server_default='y',
onupdate='z'),
Column('col7', Integer,
default='x',
server_default='y',
onupdate='z'),
Column('col8', Integer,
server_onupdate='u',
default='x',
server_default='y',
onupdate='z'))
tbl = t2
has_('col1', 'default')
has_('col2', 'default', 'server_default')
has_('col3', 'default', 'server_default')
has_('col4', 'default', 'server_default', 'server_onupdate')
has_('col5', 'default', 'server_default', 'onupdate')
has_('col6', 'default', 'server_default', 'onupdate')
has_('col7', 'default', 'server_default', 'onupdate')
has_('col8', 'default', 'server_default', 'onupdate', 'server_onupdate')
@testing.fails_on('firebird', 'Data type unknown')
def test_insert(self):
r = t.insert().execute()
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
r = t.insert(inline=True).execute()
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
t.insert().execute()
ctexec = sa.select([currenttime.label('now')], bind=testing.db).scalar()
l = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(l.fetchall(), [
(x, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py')
for x in range(51, 54)])
t.insert().execute(col9=None)
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
eq_(t.select(t.c.col1==54).execute().fetchall(),
[(54, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, None)])
@testing.fails_on('firebird', 'Data type unknown')
def test_insertmany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
return
r = t.insert().execute({}, {}, {})
ctexec = currenttime.scalar()
l = t.select().execute()
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py'),
(52, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py'),
(53, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py')])
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
in the columns, where clause of a select, or in the values
clause of insert, update, raises an informative error"""
for const in (
sa.Sequence('y'),
sa.ColumnDefault('y'),
sa.DefaultClause('y')
):
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression object or string expected.",
t.select, [const]
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str, t.insert().values(col4=const)
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str, t.update().values(col4=const)
)
def test_missing_many_param(self):
assert_raises_message(exc.StatementError,
"A value is required for bind parameter 'col7', in parameter group 1",
t.insert().execute,
{'col4':7, 'col7':12, 'col8':19},
{'col4':7, 'col8':19},
{'col4':7, 'col7':12, 'col8':19},
)
def test_insert_values(self):
t.insert(values={'col3':50}).execute()
l = t.select().execute()
eq_(50, l.first()['col3'])
@testing.fails_on('firebird', 'Data type unknown')
def test_updatemany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
return
t.insert().execute({}, {}, {})
t.update(t.c.col1==sa.bindparam('pkval')).execute(
{'pkval':51,'col7':None, 'col8':None, 'boolcol1':False})
t.update(t.c.col1==sa.bindparam('pkval')).execute(
{'pkval':51,},
{'pkval':52,},
{'pkval':53,})
l = t.select().execute()
ctexec = currenttime.scalar()
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'im the update', f2, ts, ts, ctexec, False, False,
13, today, 'py'),
(52, 'im the update', f2, ts, ts, ctexec, True, False,
13, today, 'py'),
(53, 'im the update', f2, ts, ts, ctexec, True, False,
13, today, 'py')])
@testing.fails_on('firebird', 'Data type unknown')
def test_update(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1==pk).execute(col4=None, col5=None)
ctexec = currenttime.scalar()
l = t.select(t.c.col1==pk).execute()
l = l.first()
eq_(l,
(pk, 'im the update', f2, None, None, ctexec, True, False,
13, datetime.date.today(), 'py'))
eq_(11, f2)
@testing.fails_on('firebird', 'Data type unknown')
def test_update_values(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1==pk, values={'col3': 55}).execute()
l = t.select(t.c.col1==pk).execute()
l = l.first()
eq_(55, l['col3'])
class PKDefaultTest(fixtures.TablesTest):
__requires__ = ('subqueries',)
@classmethod
def define_tables(cls, metadata):
t2 = Table('t2', metadata,
Column('nextid', Integer))
Table('t1', metadata,
Column('id', Integer, primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar()),
Column('data', String(30)))
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
def _test(self, returning):
t2, t1 = self.tables.t2, self.tables.t1
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(options={'implicit_returning':returning})
engine.execute(t2.insert(), nextid=1)
r = engine.execute(t1.insert(), data='hi')
eq_([1], r.inserted_primary_key)
engine.execute(t2.insert(), nextid=2)
r = engine.execute(t1.insert(), data='there')
eq_([2], r.inserted_primary_key)
class PKIncrementTest(fixtures.TablesTest):
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table("aitable", metadata,
Column('id', Integer, Sequence('ai_id_seq', optional=True),
primary_key=True),
Column('int1', Integer),
Column('str1', String(20)))
# TODO: add coverage for increment on a secondary column in a key
@testing.fails_on('firebird', 'Data type unknown')
def _test_autoincrement(self, bind):
aitable = self.tables.aitable
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1='row 2')
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1='row 3')
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={'int1':func.length('four')}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1,2,3,4]))
eq_(list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, 'row 2'), (3, 3, 'row 3'), (4, 4, None)])
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
def test_autoincrement_transaction(self):
con = testing.db.connect()
tx = con.begin()
try:
try:
self._test_autoincrement(con)
except:
try:
tx.rollback()
except:
pass
raise
else:
tx.commit()
finally:
con.close()
class EmptyInsertTest(fixtures.TestBase):
@testing.exclude('sqlite', '<', (3, 3, 8), 'no empty insert support')
@testing.fails_on('oracle', 'FIXME: unknown')
def test_empty_insert(self):
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column('is_true', Boolean, server_default=('1')))
metadata.create_all()
try:
result = t1.insert().execute()
eq_(1, select([func.count(text('*'))], from_obj=t1).scalar())
eq_(True, t1.select().scalar())
finally:
metadata.drop_all()
class AutoIncrementTest(fixtures.TablesTest):
__requires__ = ('identity',)
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
"""Each test manipulates self.metadata individually."""
@testing.exclude('sqlite', '<', (3, 4), 'no database support')
def test_autoincrement_single_col(self):
single = Table('single', self.metadata,
Column('id', Integer, primary_key=True))
single.create()
r = single.insert().execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(1, sa.select([func.count(sa.text('*'))], from_obj=single).scalar())
def test_autoincrement_fk(self):
nodes = Table('nodes', self.metadata,
Column('id', Integer, primary_key=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
nodes.create()
r = nodes.insert().execute(data='foo')
id_ = r.inserted_primary_key[0]
nodes.insert().execute(data='bar', parent_id=id_)
def test_autoinc_detection_no_affinity(self):
class MyType(TypeDecorator):
impl = TypeEngine
assert MyType()._type_affinity is None
t = Table('x', MetaData(),
Column('id', MyType(), primary_key=True)
)
assert t._autoincrement_column is None
def test_autoincrement_ignore_fk(self):
m = MetaData()
y = Table('y', m,
Column('id', Integer(), primary_key=True)
)
x = Table('x', m,
Column('id', Integer(),
ForeignKey('y.id'),
autoincrement="ignore_fk", primary_key=True)
)
assert x._autoincrement_column is x.c.id
def test_autoincrement_fk_disqualifies(self):
m = MetaData()
y = Table('y', m,
Column('id', Integer(), primary_key=True)
)
x = Table('x', m,
Column('id', Integer(),
ForeignKey('y.id'),
primary_key=True)
)
assert x._autoincrement_column is None
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_non_autoincrement(self):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table("nonaitest", self.metadata,
Column('id', Integer, autoincrement=False, primary_key=True),
Column('data', String(20)))
nonai.create()
try:
# postgresql + mysql strict will fail on first row,
# mysql in legacy mode fails on second row
nonai.insert().execute(data='row 1')
nonai.insert().execute(data='row 2')
assert False
except sa.exc.DBAPIError, e:
assert True
nonai.insert().execute(id=1, data='row 1')
class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_create_drop_ddl(self):
self.assert_compile(
CreateSequence(Sequence('foo_seq')),
"CREATE SEQUENCE foo_seq",
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', start=5)),
"CREATE SEQUENCE foo_seq START WITH 5",
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', increment=2)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2",
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', increment=2, start=5)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5",
)
self.assert_compile(
DropSequence(Sequence('foo_seq')),
"DROP SEQUENCE foo_seq",
)
class SequenceExecTest(fixtures.TestBase):
__requires__ = ('sequences',)
@classmethod
def setup_class(cls):
cls.seq= Sequence("my_sequence")
cls.seq.create(testing.db)
@classmethod
def teardown_class(cls):
cls.seq.drop(testing.db)
def _assert_seq_result(self, ret):
"""asserts return of next_value is an int"""
assert isinstance(ret, (int, long))
assert ret > 0
def test_implicit_connectionless(self):
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.execute())
def test_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(s.execute(testing.db))
def test_explicit_optional(self):
"""test dialect executes a Sequence, returns nextval, whether
or not "optional" is set """
s = Sequence("my_sequence", optional=True)
self._assert_seq_result(s.execute(testing.db))
def test_func_implicit_connectionless_execute(self):
"""test func.next_value().execute()/.scalar() works
with connectionless execution. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().execute().scalar())
def test_func_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(testing.db.scalar(s.next_value()))
def test_func_implicit_connectionless_scalar(self):
"""test func.next_value().execute()/.scalar() works. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().scalar())
def test_func_embedded_select(self):
"""test can use next_value() in select column expr"""
s = Sequence("my_sequence")
self._assert_seq_result(
testing.db.scalar(select([s.next_value()]))
)
@testing.fails_on('oracle', "ORA-02287: sequence number not allowed here")
@testing.provide_metadata
def test_func_embedded_whereclause(self):
"""test can use next_value() in whereclause"""
metadata = self.metadata
t1 = Table('t', metadata,
Column('x', Integer)
)
t1.create(testing.db)
testing.db.execute(t1.insert(), [{'x':1}, {'x':300}, {'x':301}])
s = Sequence("my_sequence")
eq_(
testing.db.execute(
t1.select().where(t1.c.x > s.next_value())
).fetchall(),
[(300, ), (301, )]
)
@testing.provide_metadata
def test_func_embedded_valuesbase(self):
"""test can use next_value() in values() of _ValuesBase"""
metadata = self.metadata
t1 = Table('t', metadata,
Column('x', Integer)
)
t1.create(testing.db)
s = Sequence("my_sequence")
testing.db.execute(
t1.insert().values(x=s.next_value())
)
self._assert_seq_result(
testing.db.scalar(t1.select())
)
@testing.provide_metadata
def test_inserted_pk_no_returning(self):
"""test inserted_primary_key contains [None] when
pk_col=next_value(), implicit returning is not used."""
metadata = self.metadata
e = engines.testing_engine(options={'implicit_returning':False})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table('t', metadata,
Column('x', Integer, primary_key=True)
)
t1.create()
r = e.execute(
t1.insert().values(x=s.next_value())
)
eq_(r.inserted_primary_key, [None])
@testing.requires.returning
@testing.provide_metadata
def test_inserted_pk_implicit_returning(self):
"""test inserted_primary_key contains the result when
pk_col=next_value(), when implicit returning is used."""
metadata = self.metadata
e = engines.testing_engine(options={'implicit_returning':True})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table('t', metadata,
Column('x', Integer, primary_key=True)
)
t1.create()
r = e.execute(
t1.insert().values(x=s.next_value())
)
self._assert_seq_result(r.inserted_primary_key[0])
class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__requires__ = ('sequences',)
@testing.fails_on('firebird', 'no FB support for start/increment')
def test_start_increment(self):
for seq in (
Sequence('foo_seq'),
Sequence('foo_seq', start=8),
Sequence('foo_seq', increment=5)):
seq.create(testing.db)
try:
values = [
testing.db.execute(seq) for i in range(3)
]
start = seq.start or 1
inc = seq.increment or 1
assert values == list(xrange(start, start + inc * 3, inc))
finally:
seq.drop(testing.db)
def _has_sequence(self, name):
return testing.db.dialect.has_sequence(testing.db, name)
def test_nextval_render(self):
"""test dialect renders the "nextval" construct,
whether or not "optional" is set """
for s in (
Sequence("my_seq"),
Sequence("my_seq", optional=True)):
assert str(s.next_value().
compile(dialect=testing.db.dialect)) in (
"nextval('my_seq')",
"gen_id(my_seq, 1)",
"my_seq.nextval",
)
def test_nextval_unsupported(self):
"""test next_value() used on non-sequence platform
raises NotImplementedError."""
s = Sequence("my_seq")
d = sqlite.dialect()
assert_raises_message(
NotImplementedError,
"Dialect 'sqlite' does not support sequence increments.",
s.next_value().compile,
dialect=d
)
def test_checkfirst_sequence(self):
s = Sequence("my_sequence")
s.create(testing.db, checkfirst=False)
assert self._has_sequence('my_sequence')
s.create(testing.db, checkfirst=True)
s.drop(testing.db, checkfirst=False)
assert not self._has_sequence('my_sequence')
s.drop(testing.db, checkfirst=True)
def test_checkfirst_metadata(self):
m = MetaData()
s = Sequence("my_sequence", metadata=m)
m.create_all(testing.db, checkfirst=False)
assert self._has_sequence('my_sequence')
m.create_all(testing.db, checkfirst=True)
m.drop_all(testing.db, checkfirst=False)
assert not self._has_sequence('my_sequence')
m.drop_all(testing.db, checkfirst=True)
def test_checkfirst_table(self):
m = MetaData()
s = Sequence("my_sequence")
t = Table('t', m, Column('c', Integer, s, primary_key=True))
t.create(testing.db, checkfirst=False)
assert self._has_sequence('my_sequence')
t.create(testing.db, checkfirst=True)
t.drop(testing.db, checkfirst=False)
assert not self._has_sequence('my_sequence')
t.drop(testing.db, checkfirst=True)
@testing.provide_metadata
def test_table_overrides_metadata_create(self):
metadata = self.metadata
s1 = Sequence("s1", metadata=metadata)
s2 = Sequence("s2", metadata=metadata)
s3 = Sequence("s3")
t = Table('t', metadata,
Column('c', Integer, s3, primary_key=True))
assert s3.metadata is metadata
t.create(testing.db, checkfirst=True)
s3.drop(testing.db)
# 't' is created, and 's3' won't be
# re-created since it's linked to 't'.
# 's1' and 's2' are, however.
metadata.create_all(testing.db)
assert self._has_sequence('s1')
assert self._has_sequence('s2')
assert not self._has_sequence('s3')
s2.drop(testing.db)
assert self._has_sequence('s1')
assert not self._has_sequence('s2')
metadata.drop_all(testing.db)
assert not self._has_sequence('s1')
assert not self._has_sequence('s2')
class TableBoundSequenceTest(fixtures.TestBase):
__requires__ = ('sequences',)
@classmethod
def setup_class(cls):
global cartitems, sometable, metadata
metadata = MetaData(testing.db)
cartitems = Table("cartitems", metadata,
Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
Column("description", String(40)),
Column("createdate", sa.DateTime())
)
sometable = Table( 'Manager', metadata,
Column('obj_id', Integer, Sequence('obj_id_seq'), ),
Column('name', String(128)),
Column('id', Integer, Sequence('Manager_id_seq', optional=True),
primary_key=True),
)
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_insert_via_seq(self):
cartitems.insert().execute(description='hi')
cartitems.insert().execute(description='there')
r = cartitems.insert().execute(description='lala')
assert r.inserted_primary_key and r.inserted_primary_key[0] is not None
id_ = r.inserted_primary_key[0]
eq_(1,
sa.select([func.count(cartitems.c.cart_id)],
sa.and_(cartitems.c.description == 'lala',
cartitems.c.cart_id == id_)).scalar())
cartitems.select().execute().fetchall()
def test_seq_nonpk(self):
"""test sequences fire off as defaults on non-pk columns"""
engine = engines.testing_engine(options={'implicit_returning':False})
result = engine.execute(sometable.insert(), name="somename")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
result = engine.execute(sometable.insert(), name="someother")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
sometable.insert().execute(
{'name':'name3'},
{'name':'name4'})
eq_(sometable.select().order_by(sometable.c.id).execute().fetchall(),
[(1, "somename", 1),
(2, "someother", 2),
(3, "name3", 3),
(4, "name4", 4)])
class SpecialTypePKTest(fixtures.TestBase):
"""test process_result_value in conjunction with primary key columns.
Also tests that "autoincrement" checks are against column.type._type_affinity,
rather than the class of "type" itself.
"""
@classmethod
def setup_class(cls):
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
if value is None:
return None
return int(value[4:])
def process_result_value(self, value, dialect):
if value is None:
return None
return "INT_%d" % value
cls.MyInteger = MyInteger
@testing.provide_metadata
def _run_test(self, *arg, **kw):
metadata = self.metadata
implicit_returning = kw.pop('implicit_returning', True)
kw['primary_key'] = True
if kw.get('autoincrement', True):
kw['test_needs_autoincrement'] = True
t = Table('x', metadata,
Column('y', self.MyInteger, *arg, **kw),
Column('data', Integer),
implicit_returning=implicit_returning
)
t.create()
r = t.insert().values(data=5).execute()
# we don't pre-fetch 'server_default'.
if 'server_default' in kw and (not testing.db.dialect.implicit_returning or not implicit_returning):
eq_(r.inserted_primary_key, [None])
else:
eq_(r.inserted_primary_key, ['INT_1'])
r.close()
eq_(
t.select().execute().first(),
('INT_1', 5)
)
def test_plain(self):
# among other things, tests that autoincrement
# is enabled.
self._run_test()
def test_literal_default_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger).label('foo'))
def test_literal_default_no_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger))
def test_sequence(self):
self._run_test(Sequence('foo_seq'))
def test_server_default(self):
self._run_test(server_default='1',)
def test_server_default_no_autoincrement(self):
self._run_test(server_default='1', autoincrement=False)
def test_clause(self):
stmt = select([literal("INT_1", type_=self.MyInteger)]).as_scalar()
self._run_test(default=stmt)
@testing.requires.returning
def test_no_implicit_returning(self):
self._run_test(implicit_returning=False)
@testing.requires.returning
def test_server_default_no_implicit_returning(self):
self._run_test(server_default='1', autoincrement=False)
class ServerDefaultsOnPKTest(fixtures.TestBase):
@testing.provide_metadata
def test_string_default_none_on_insert(self):
"""Test that without implicit returning, we return None for
a string server default.
That is, we don't want to attempt to pre-execute "server_default"
generically - the user should use a Python side-default for a case
like this. Testing that all backends do the same thing here.
"""
metadata = self.metadata
t = Table('x', metadata,
Column('y', String(10), server_default='key_one', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, [None])
eq_(
t.select().execute().fetchall(),
[('key_one', 'data')]
)
@testing.requires.returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self):
"""With implicit_returning, we get a string PK default back no problem."""
metadata = self.metadata
t = Table('x', metadata,
Column('y', String(10), server_default='key_one', primary_key=True),
Column('data', String(10))
)
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, ['key_one'])
eq_(
t.select().execute().fetchall(),
[('key_one', 'data')]
)
@testing.provide_metadata
def test_int_default_none_on_insert(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
assert t._autoincrement_column is None
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, [None])
if testing.against('sqlite'):
eq_(
t.select().execute().fetchall(),
[(1, 'data')]
)
else:
eq_(
t.select().execute().fetchall(),
[(5, 'data')]
)
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
assert t._autoincrement_column is None
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table('x', m2, autoload=True, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10)),
implicit_returning=False
)
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table('x', m2, autoload=True, implicit_returning=False)
r = t2.insert().execute(data='data')
eq_(r.inserted_primary_key, [None])
if testing.against('sqlite'):
eq_(
t2.select().execute().fetchall(),
[(1, 'data')]
)
else:
eq_(
t2.select().execute().fetchall(),
[(5, 'data')]
)
@testing.requires.returning
@testing.provide_metadata
def test_int_default_on_insert_with_returning(self):
metadata = self.metadata
t = Table('x', metadata,
Column('y', Integer,
server_default='5', primary_key=True),
Column('data', String(10))
)
metadata.create_all()
r = t.insert().execute(data='data')
eq_(r.inserted_primary_key, [5])
eq_(
t.select().execute().fetchall(),
[(5, 'data')]
)
class UnicodeDefaultsTest(fixtures.TestBase):
def test_no_default(self):
c = Column(Unicode(32))
def test_unicode_default(self):
# Py3K
#default = 'foo'
# Py2K
default = u'foo'
# end Py2K
c = Column(Unicode(32), default=default)
def test_nonunicode_default(self):
# Py3K
#default = b'foo'
# Py2K
default = 'foo'
# end Py2K
assert_raises_message(
sa.exc.SAWarning,
"Unicode column received non-unicode default value.",
Column,
Unicode(32),
default=default
)
|
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines blogpost directives.
See `Tutorial: Writing a simple extension
<https://www.sphinx-doc.org/en/master/development/tutorials/helloworld.html>`_,
`Creating reStructuredText Directives
<https://docutils.sourceforge.io/docs/howto/rst-directives.html>`_
"""
import os
import sphinx
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx.locale import _ as _locale
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.util.nodes import set_source_info, process_index_entry
from sphinx.util.nodes import nested_parse_with_titles
from .blog_post import BlogPost
from ..texthelper.texts_language import TITLES
class blogpost_node(nodes.Element):
"""
Defines *blogpost* node.
"""
pass
class blogpostagg_node(nodes.Element):
"""
Defines *blogpostagg* node.
"""
pass
class BlogPostDirective(Directive):
"""
Extracts information about a blog post described by a directive ``.. blogpost::``
and modifies the documentation if *env* is not null. The directive handles the following
options:
* *date*: date of the blog (mandatory)
* *title*: title (mandatory)
* *keywords*: keywords, comma separated (mandatory)
* *categories*: categories, comma separated (mandatory)
* *author*: author (optional)
* *blog_background*: can change the blog background (boolean, default is True)
* *lid* or *label*: an id to refer to (optional)
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'date': directives.unchanged,
'title': directives.unchanged,
'keywords': directives.unchanged,
'categories': directives.unchanged,
'author': directives.unchanged,
'blog_background': directives.unchanged,
'lid': directives.unchanged,
'label': directives.unchanged,
}
has_content = True
add_index = True
add_share = True
blogpost_class = blogpost_node
default_config_bg = "blog_background_page"
def suffix_label(self):
"""
returns a suffix to add to a label,
it should not be empty for aggregated pages
@return str
"""
return ""
def run(self):
"""
extracts the information in a dictionary and displays it
if the environment is not null
@return a list of nodes
"""
# settings
sett = self.state.document.settings
language_code = sett.language_code
if hasattr(sett, "out_blogpostlist"):
sett.out_blogpostlist.append(self)
# env
if hasattr(self.state.document.settings, "env"):
env = self.state.document.settings.env
else:
env = None
if env is None:
docname = "___unknown_docname___"
config = None
blog_background = False
sharepost = None
else:
# otherwise, it means sphinx is running
docname = env.docname
# settings and configuration
config = env.config
try:
blog_background = getattr(
config, self.__class__.default_config_bg)
except AttributeError as e:
raise AttributeError("Unable to find '{1}' in \n{0}".format(
"\n".join(sorted(config.values)), self.__class__.default_config_bg)) from e
sharepost = config.sharepost if self.__class__.add_share else None
# post
p = {
'docname': docname,
'lineno': self.lineno,
'date': self.options["date"],
'title': self.options["title"],
'keywords': [a.strip() for a in self.options["keywords"].split(",")],
'categories': [a.strip() for a in self.options["categories"].split(",")],
'blog_background': self.options.get("blog_background", str(blog_background)).strip() in ("True", "true", "1"),
'lid': self.options.get("lid", self.options.get("label", None)),
}
tag = BlogPost.build_tag(p["date"], p["title"]) if p[
'lid'] is None else p['lid']
targetnode = nodes.target(p['title'], '', ids=[tag])
p["target"] = targetnode
idbp = tag + "-container"
if env is not None:
if not hasattr(env, 'blogpost_all'):
env.blogpost_all = []
env.blogpost_all.append(p)
# build node
node = self.__class__.blogpost_class(ids=[idbp], year=p["date"][:4],
rawfile=self.options.get(
"rawfile", None),
linktitle=p["title"], lg=language_code,
blog_background=p["blog_background"])
return self.fill_node(node, env, tag, p, language_code, targetnode, sharepost)
def fill_node(self, node, env, tag, p, language_code, targetnode, sharepost):
"""
Fills the content of the node.
"""
# add a label
suffix_label = self.suffix_label() if not p['lid'] else ""
tag = "{0}{1}".format(tag, suffix_label)
tnl = [".. _{0}:".format(tag), ""]
title = "{0} {1}".format(p["date"], p["title"])
tnl.append(title)
tnl.append("=" * len(title))
tnl.append("")
if sharepost is not None:
tnl.append("")
tnl.append(":sharenet:`{0}`".format(sharepost))
tnl.append('')
tnl.append('')
content = StringList(tnl)
content = content + self.content
try:
nested_parse_with_titles(self.state, content, node)
except Exception as e: # pragma: no cover
from sphinx.util import logging
logger = logging.getLogger("blogpost")
logger.warning(
"[blogpost] unable to parse '{0}' - {1}".format(title, e))
raise e
# final
p['blogpost'] = node
self.exe_class = p.copy()
p["content"] = content
node['classes'] += ["blogpost"]
# for the instruction tocdelay.
node['toctitle'] = title
node['tocid'] = tag
node['tocdoc'] = env.docname
# end.
ns = [node]
return ns
class BlogPostDirectiveAgg(BlogPostDirective):
"""
same but for the same post in a aggregated pages
"""
add_index = False
add_share = False
blogpost_class = blogpostagg_node
default_config_bg = "blog_background"
option_spec = {'date': directives.unchanged,
'title': directives.unchanged,
'keywords': directives.unchanged,
'categories': directives.unchanged,
'author': directives.unchanged,
'rawfile': directives.unchanged,
'blog_background': directives.unchanged,
}
def suffix_label(self):
"""
returns a suffix to add to a label,
it should not be empty for aggregated pages
@return str
"""
if hasattr(self.state.document.settings, "env"):
env = self.state.document.settings.env
docname = os.path.split(env.docname)[-1]
docname = os.path.splitext(docname)[0]
else:
env = None
docname = ""
return "-agg" + docname
def fill_node(self, node, env, tag, p, language_code, targetnode, sharepost):
"""
Fill the node of an aggregated page.
"""
# add a label
suffix_label = self.suffix_label()
container = nodes.container()
tnl = [".. _{0}{1}:".format(tag, suffix_label), ""]
content = StringList(tnl)
self.state.nested_parse(content, self.content_offset, container)
node += container
# id section
if env is not None:
mid = int(env.new_serialno('indexblog-u-%s' % p["date"][:4])) + 1
else:
mid = -1
# add title
sids = "y{0}-{1}".format(p["date"][:4], mid)
section = nodes.section(ids=[sids])
section['year'] = p["date"][:4]
section['blogmid'] = mid
node += section
textnodes, messages = self.state.inline_text(p["title"], self.lineno)
section += nodes.title(p["title"], '', *textnodes)
section += messages
# add date and share buttons
tnl = [":bigger:`::5:{0}`".format(p["date"])]
if sharepost is not None:
tnl.append(":sharenet:`{0}`".format(sharepost))
tnl.append('')
content = StringList(tnl)
content = content + self.content
# parse the content into sphinx directive,
# it adds it to section
container = nodes.container()
# nested_parse_with_titles(self.state, content, paragraph)
self.state.nested_parse(content, self.content_offset, container)
section += container
# final
p['blogpost'] = node
self.exe_class = p.copy()
p["content"] = content
node['classes'] += ["blogpost"]
# target
# self.state.add_target(p['title'], '', targetnode, lineno)
# index (see site-packages/sphinx/directives/code.py, class Index)
if self.__class__.add_index:
# it adds an index
# self.state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = ne = []
indexnode['inline'] = False
set_source_info(self, indexnode)
for entry in set(p["keywords"] + p["categories"] + [p["date"]]):
ne.extend(process_index_entry(entry, tag)) # targetid))
ns = [indexnode, targetnode, node]
else:
ns = [targetnode, node]
return ns
def visit_blogpost_node(self, node):
"""
what to do when visiting a node blogpost
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
if node["blog_background"]:
# the node will be in a box
self.visit_admonition(node)
def depart_blogpost_node(self, node):
"""
what to do when leaving a node blogpost
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
if node["blog_background"]:
# the node will be in a box
self.depart_admonition(node)
def visit_blogpostagg_node(self, node):
"""
what to do when visiting a node blogpost
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
pass
def depart_blogpostagg_node(self, node):
"""
what to do when leaving a node blogpost,
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
pass
def depart_blogpostagg_node_html(self, node):
"""
what to do when leaving a node blogpost,
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
if node.hasattr("year"):
rawfile = node["rawfile"]
if rawfile is not None:
# there is probably better to do
# module name is something list doctuils.../[xx].py
lg = node["lg"]
name = os.path.splitext(os.path.split(rawfile)[-1])[0]
name += ".html"
year = node["year"]
linktitle = node["linktitle"]
link = """<p><a class="reference internal" href="{0}/{2}" title="{1}">{3}</a></p>""" \
.format(year, linktitle, name, TITLES[lg]["more"])
self.body.append(link)
else:
self.body.append(
"%blogpostagg: link to source only available for HTML: '{}'\n".format(type(self)))
######################
# unused, kept as example
######################
class blogpostlist_node(nodes.General, nodes.Element):
"""
defines *blogpostlist* node,
unused, kept as example
"""
pass
class BlogPostListDirective(Directive):
"""
unused, kept as example
"""
def run(self):
return [BlogPostListDirective.blogpostlist('')]
def purge_blogpost(app, env, docname):
"""
unused, kept as example
"""
if not hasattr(env, 'blogpost_all'):
return
env.blogpost_all = [post for post in env.blogpost_all
if post['docname'] != docname]
def process_blogpost_nodes(app, doctree, fromdocname): # pragma: no cover
"""
unused, kept as example
"""
if not app.config.blogpost_include_s:
for node in doctree.traverse(blogpost_node):
node.parent.remove(node)
# Replace all blogpostlist nodes with a list of the collected blogposts.
# Augment each blogpost with a backlink to the original location.
env = app.builder.env
if hasattr(env, "settings") and hasattr(env.settings, "language_code"):
lang = env.settings.language_code
else:
lang = "en"
blogmes = TITLES[lang]["blog_entry"]
for node in doctree.traverse(blogpostlist_node):
if not app.config.blogpost_include_s:
node.replace_self([])
continue
content = []
for post_info in env.blogpost_all:
para = nodes.paragraph()
filename = env.doc2path(post_info['docname'], base=None)
description = (_locale(blogmes) % (filename, post_info['lineno']))
para += nodes.Text(description, description)
# Create a reference
newnode = nodes.reference('', '')
innernode = nodes.emphasis(_locale('here'), _locale('here'))
newnode['refdocname'] = post_info['docname']
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, post_info['docname'])
try:
newnode['refuri'] += '#' + post_info['target']['refid']
except Exception as e:
raise KeyError("refid in not present in '{0}'".format(
post_info['target'])) from e
newnode.append(innernode)
para += newnode
para += nodes.Text('.)', '.)')
# Insert into the blogpostlist
content.append(post_info['blogpost'])
content.append(para)
node.replace_self(content)
def setup(app):
"""
setup for ``blogpost`` (sphinx)
"""
# this command enables the parameter blog_background to be part of the
# configuration
app.add_config_value('sharepost', None, 'env')
app.add_config_value('blog_background', True, 'env')
app.add_config_value('blog_background_page', False, 'env')
app.add_config_value('out_blogpostlist', [], 'env')
if hasattr(app, "add_mapping"):
app.add_mapping('blogpost', blogpost_node)
app.add_mapping('blogpostagg', blogpostagg_node)
# app.add_node(blogpostlist)
app.add_node(blogpost_node,
html=(visit_blogpost_node, depart_blogpost_node),
epub=(visit_blogpost_node, depart_blogpost_node),
elatex=(visit_blogpost_node, depart_blogpost_node),
latex=(visit_blogpost_node, depart_blogpost_node),
rst=(visit_blogpost_node, depart_blogpost_node),
md=(visit_blogpost_node, depart_blogpost_node),
text=(visit_blogpost_node, depart_blogpost_node))
app.add_node(blogpostagg_node,
html=(visit_blogpostagg_node, depart_blogpostagg_node_html),
epub=(visit_blogpostagg_node, depart_blogpostagg_node_html),
elatex=(visit_blogpostagg_node, depart_blogpostagg_node),
latex=(visit_blogpostagg_node, depart_blogpostagg_node),
rst=(visit_blogpostagg_node, depart_blogpostagg_node),
md=(visit_blogpostagg_node, depart_blogpostagg_node),
text=(visit_blogpostagg_node, depart_blogpostagg_node))
app.add_directive('blogpost', BlogPostDirective)
app.add_directive('blogpostagg', BlogPostDirectiveAgg)
#app.add_directive('blogpostlist', BlogPostListDirective)
#app.connect('doctree-resolved', process_blogpost_nodes)
#app.connect('env-purge-doc', purge_blogpost)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
|
"""jsonstreamer provides a SAX-like push parser via the JSONStreamer class and a 'object' parser via the
class which emits top level entities in any JSON object.
Useful for parsing partial JSON coming over the wire or via disk
Uses 'again' python module's 'events.EventSource' framework for event boilerplate
again -> https://github.com/kashifrazzaqui/again#eventing-boilerplate
"""
from enum import Enum
from sys import stdin, stdout
from again import events
from .yajl.parse import YajlParser, YajlListener, YajlError
from .tape import Tape
JSONLiteralType = Enum('JSONValueType', 'STRING NUMBER BOOLEAN NULL')
JSONCompositeType = Enum('JSONCompositeType', 'OBJECT ARRAY')
class JSONStreamerException(Exception):
def __init__(self, msg):
self._msg = msg
def __str__(self):
if isinstance(self._msg, str):
return self._msg
else:
return self._msg.decode("utf-8")
class JSONStreamer(events.EventSource, YajlListener):
""" Provides a SAX-like push parser which emits events on parsing JSON tokens
Apart from the public API of this class - an API for attaching events is inherited from again.events.EventSource
which provides the following functionality
self.add_listener(event, listener)
self.remove_listener(event, listener)
self.add_catch_all_listener(listener) - this listener receives ALL events
self.remove_catch_all_listener(listener)
self.auto_listen(self, observer, prefix="_on_") - this automatically finds and attaches methods in the `observer`
object which are named as `_on_event` as listeners to the jsonstreamer object. This reduces the need to attach
each listener manually
Events:
Events are of the form (event, *args)
JSONStreamer.DOC_START_EVENT (str): Fired when the `consume` method is called for the first time
JSONStreamer.DOC_END_EVENT (str): Fired when the `close` method is called
JSONStreamer.OBJECT_START_EVENT (str): Fired when a JSON object starts with a `{`
JSONStreamer.OBJECT_END_EVENT (str): Fired when a JSON object ends with a `}`
JSONStreamer.ARRAY_START_EVENT (str): Fired when a JSON array starts with a `[`
JSONStreamer.ARRAY_END_EVENT (str): Fired when a JSON array ends with a `]`
JSONStreamer.KEY_EVENT (str): Fired when a key is encountered within a JSON Object, event also delivers a
string payload with the name of the key as the only parameter in *args
JSONStreamer.VALUE_EVENT (str): Fired when a value for a key is encountered; event also delivers a payload
with the value as the only parameter of *args. The type of the value can be a `string|int|float|boolean|None`
JSONStreamer.ELEMENT_EVENT (str): Fired when an array element is encounterd; event also delivers a paylod
with the value as the only parameter of *args. The type of the value can be a `string|int|float|boolean|None`
"""
DOC_START_EVENT = 'doc_start'
DOC_END_EVENT = 'doc_end'
OBJECT_START_EVENT = 'object_start'
OBJECT_END_EVENT = 'object_end'
ARRAY_START_EVENT = 'array_start'
ARRAY_END_EVENT = 'array_end'
KEY_EVENT = 'key'
VALUE_EVENT = 'value'
ELEMENT_EVENT = 'element'
def __init__(self):
super(JSONStreamer, self).__init__()
self._file_like = Tape()
self._stack = []
self._pending_value = False
self._started = False
self._parser = YajlParser(self)
def on_start_map(self, ctx):
self._stack.append(JSONCompositeType.OBJECT)
self._pending_value = False
self.fire(JSONStreamer.OBJECT_START_EVENT)
def on_end_map(self, ctx):
self._stack.pop()
self._pending_value = False
self.fire(JSONStreamer.OBJECT_END_EVENT)
def on_start_array(self, ctx):
self._stack.append(JSONCompositeType.ARRAY)
self.fire(JSONStreamer.ARRAY_START_EVENT)
def on_end_array(self, ctx):
self._pending_value = False
self._stack.pop()
self.fire(JSONStreamer.ARRAY_END_EVENT)
def on_map_key(self, ctx, value):
self.fire(JSONStreamer.KEY_EVENT, value)
def on_string(self, ctx, value):
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
else:
raise RuntimeError('Invalid json-streamer state')
def on_boolean(self, ctx, value):
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, bool(value))
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, bool(value))
else:
raise RuntimeError('Invalid json-streamer state')
def on_null(self, ctx):
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, None)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, None)
else:
raise RuntimeError('Invalid json-streamer state')
def on_integer(self, ctx, value):
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, int(value))
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, int(value))
else:
raise RuntimeError('Invalid json-streamer state')
def on_double(self, ctx, value):
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, float(value))
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, float(value))
else:
raise RuntimeError('Invalid json-streamer state')
def on_number(self, ctx, value):
''' Since this is defined both integer and double callbacks are useless '''
value = int(value) if value.isdigit() else float(value)
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
else:
raise RuntimeError('Invalid json-streamer state')
def _on_literal(self, json_value_type, value):
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
if self._pending_value:
self._pending_value = False
self.fire(JSONStreamer.VALUE_EVENT, value)
else:
# must be a key
assert (json_value_type is JSONLiteralType.STRING)
self._pending_value = True
self.fire(JSONStreamer.KEY_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
def consume(self, data):
"""Takes input that must be parsed
Note:
Attach all your listeners before calling this method
Args:
data (str): input json string
"""
if not self._started:
self.fire(JSONStreamer.DOC_START_EVENT)
self._started = True
self._file_like.write(data)
try:
self._parser.parse(self._file_like)
except YajlError as ye:
raise JSONStreamerException(ye.value)
def close(self):
"""Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl"""
self.fire(JSONStreamer.DOC_END_EVENT)
self._stack = None
self._parser.close()
class ObjectStreamer(events.EventSource):
""" Emits key-value pairs or array elements at the top level of a json object/array
Apart from the public API of this class - an API for attaching events is inherited from again.events.EventSource
which provides the following functionality
self.add_listener(event, listener)
self.remove_listener(event, listener)
self.add_catch_all_listener(listener) - this listener receives ALL events
self.remove_catch_all_listener(listener)
self.auto_listen(self, observer, prefix="_on_") - this automatically finds and attaches methods in the `observer`
object which are named as `_on_event` as listeners to the jsonstreamer object. This reduces the need to attach
each listener manually
Events:
Events are of the form (event, *args)
ObjectStreamer.OBJECT_STREAM_START_EVENT (str): Fired at the start of the `root` JSON object, this is mutually
exclusive from the ARRAY_STREAM_*_EVENTs
ObjectStreamer.OBJECT_STREAM_END_EVENT (str): Fired at the end of the `root` JSON object, this is mutually
exclusive from the ARRAY_STREAM_*_EVENTs
ObjectStreamer.ARRAY_STREAM_START_EVENT (str): Fired at the start of the `root` JSON array, this is mutually
exclusive from the OBJECT_STREAM_*_EVENTs
ObjectStreamer.ARRAY_STREAM_END_EVENT (str): Fired at the end of the `root` JSON array, this is mutually
exclusive from the OBJECT_STREAM_*_EVENTs
ObjectStreamer.PAIR_EVENT (str): Fired when a top level key-value pair of the `root` object is complete. This
event also carries a tuple payload which contains the key (str) and value (str|int|float|boolean|None)
ObjectStreamer.ELEMENT_EVENT (str): Fired when an array element of the `root` array is complete. This event
also carries a payload which contains the value (str|int|float|boolean|None) of the element
"""
OBJECT_STREAM_START_EVENT = 'object_stream_start'
OBJECT_STREAM_END_EVENT = 'object_stream_end'
ARRAY_STREAM_START_EVENT = 'array_stream_start'
ARRAY_STREAM_END_EVENT = 'array_stream_end'
PAIR_EVENT = 'pair'
ELEMENT_EVENT = 'element'
def __init__(self):
super(ObjectStreamer, self).__init__()
self._streamer = JSONStreamer()
self._streamer.auto_listen(self)
def _on_doc_start(self):
self._root = None
self._obj_stack = []
self._key_stack = []
def _on_doc_end(self):
pass
def _on_object_start(self):
if self._root is None:
self._root = JSONCompositeType.OBJECT
self.fire(ObjectStreamer.OBJECT_STREAM_START_EVENT)
else:
d = {}
self._obj_stack.append(d)
def _process_deep_entities(self):
o = self._obj_stack.pop()
key_depth = len(self._key_stack)
if key_depth is 0:
if len(self._obj_stack) is 0:
self.fire(ObjectStreamer.ELEMENT_EVENT, o)
else:
self._obj_stack[-1].append(o)
elif key_depth is 1:
if len(self._obj_stack) is 0:
k = self._key_stack.pop()
self.fire(ObjectStreamer.PAIR_EVENT, (k, o))
else:
top = self._obj_stack[-1]
if isinstance(top, list):
top.append(o)
else:
k = self._key_stack.pop()
top[k] = o
elif key_depth > 1:
current_obj = self._obj_stack[-1]
if type(current_obj) is list:
current_obj.append(o)
else:
k = self._key_stack.pop()
current_obj[k] = o
def _on_object_end(self):
if len(self._obj_stack) > 0:
self._process_deep_entities()
else:
self.fire(ObjectStreamer.OBJECT_STREAM_END_EVENT)
def _on_array_start(self):
if self._root is None:
self._root = JSONCompositeType.ARRAY
self.fire('array_stream_start')
else:
self._obj_stack.append(list())
def _on_array_end(self):
if len(self._obj_stack) > 0:
self._process_deep_entities()
else:
self.fire(ObjectStreamer.ARRAY_STREAM_END_EVENT)
def _on_key(self, key):
self._key_stack.append(key)
def _on_value(self, value):
k = self._key_stack.pop()
if len(self._obj_stack) is 0:
self.fire(ObjectStreamer.PAIR_EVENT, (k, value))
else:
self._obj_stack[-1][k] = value
def _on_element(self, item):
if len(self._obj_stack) is 0:
self.fire('element', item)
else:
self._obj_stack[-1].append(item)
def consume(self, data):
"""Takes input that must be parsed
Note:
Attach all your listeners before calling this method
Args:
data (str): input json string
"""
try:
self._streamer.consume(data)
except YajlError as ye:
print(ye.value)
raise JSONStreamerException(ye.value)
def close(self):
"""Closes the object streamer"""
self._streamer.close()
self._streamer = None
def run(data=stdin):
json_input = data.read()
def _catch_all(event_name, *args):
stdout.write('\nevent: ' + event_name)
for each in args:
stdout.write('\t->' + ' values: ' + str(each))
streamer = JSONStreamer()
streamer.add_catch_all_listener(_catch_all)
streamer.consume(json_input)
streamer.close()
stdout.write('\n')
if __name__ == '__main__':
run()
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import re
import time
from botocore.compat import ensure_unicode, ensure_bytes, urlparse
from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS
logger = logging.getLogger(__name__)
class Monitor(object):
_EVENTS_TO_REGISTER = [
'before-parameter-build',
'request-created',
'response-received',
'after-call',
'after-call-error',
]
def __init__(self, adapter, publisher):
"""Abstraction for monitoring clients API calls
:param adapter: An adapter that takes event emitter events
and produces monitor events
:param publisher: A publisher for generated monitor events
"""
self._adapter = adapter
self._publisher = publisher
def register(self, event_emitter):
"""Register an event emitter to the monitor"""
for event_to_register in self._EVENTS_TO_REGISTER:
event_emitter.register_last(event_to_register, self.capture)
def capture(self, event_name, **payload):
"""Captures an incoming event from the event emitter
It will feed an event emitter event to the monitor's adaptor to create
a monitor event and then publish that event to the monitor's publisher.
"""
try:
monitor_event = self._adapter.feed(event_name, payload)
if monitor_event:
self._publisher.publish(monitor_event)
except Exception as e:
logger.debug(
'Exception %s raised by client monitor in handling event %s',
e, event_name, exc_info=True)
class MonitorEventAdapter(object):
def __init__(self, time=time.time):
"""Adapts event emitter events to produce monitor events
:type time: callable
:param time: A callable that produces the current time
"""
self._time = time
def feed(self, emitter_event_name, emitter_payload):
"""Feed an event emitter event to generate a monitor event
:type emitter_event_name: str
:param emitter_event_name: The name of the event emitted
:type emitter_payload: dict
:param emitter_payload: The payload to associated to the event
emitted
:rtype: BaseMonitorEvent
:returns: A monitor event based on the event emitter events
fired
"""
return self._get_handler(emitter_event_name)(**emitter_payload)
def _get_handler(self, event_name):
return getattr(
self, '_handle_' + event_name.split('.')[0].replace('-', '_')
)
def _handle_before_parameter_build(self, model, context, **kwargs):
context['current_api_call_event'] = APICallEvent(
service=model.service_model.service_id,
operation=model.wire_name,
timestamp=self._get_current_time(),
)
def _handle_request_created(self, request, **kwargs):
context = request.context
new_attempt_event = context[
'current_api_call_event'].new_api_call_attempt(
timestamp=self._get_current_time())
new_attempt_event.request_headers = request.headers
new_attempt_event.url = request.url
context['current_api_call_attempt_event'] = new_attempt_event
def _handle_response_received(self, parsed_response, context, exception,
**kwargs):
attempt_event = context.pop('current_api_call_attempt_event')
attempt_event.latency = self._get_latency(attempt_event)
if parsed_response is not None:
attempt_event.http_status_code = parsed_response[
'ResponseMetadata']['HTTPStatusCode']
attempt_event.response_headers = parsed_response[
'ResponseMetadata']['HTTPHeaders']
attempt_event.parsed_error = parsed_response.get('Error')
else:
attempt_event.wire_exception = exception
return attempt_event
def _handle_after_call(self, context, parsed, **kwargs):
context['current_api_call_event'].retries_exceeded = parsed[
'ResponseMetadata'].get('MaxAttemptsReached', False)
return self._complete_api_call(context)
def _handle_after_call_error(self, context, exception, **kwargs):
# If the after-call-error was emitted and the error being raised
# was a retryable connection error, then the retries must have exceeded
# for that exception as this event gets emitted **after** retries
# happen.
context['current_api_call_event'].retries_exceeded = \
self._is_retryable_exception(exception)
return self._complete_api_call(context)
def _is_retryable_exception(self, exception):
return isinstance(
exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR']))
def _complete_api_call(self, context):
call_event = context.pop('current_api_call_event')
call_event.latency = self._get_latency(call_event)
return call_event
def _get_latency(self, event):
return self._get_current_time() - event.timestamp
def _get_current_time(self):
return int(self._time() * 1000)
class BaseMonitorEvent(object):
def __init__(self, service, operation, timestamp):
"""Base monitor event
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the event began
"""
self.service = service
self.operation = operation
self.timestamp = timestamp
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
class APICallEvent(BaseMonitorEvent):
def __init__(self, service, operation, timestamp, latency=None,
attempts=None, retries_exceeded=False):
"""Monitor event for a single API call
This event corresponds to a single client method call, which includes
every HTTP requests attempt made in order to complete the client call
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the event began
:type latency: int
:param latency: The time in milliseconds to complete the client call
:type attempts: list
:param attempts: The list of APICallAttempts associated to the
APICall
:type retries_exceeded: bool
:param retries_exceeded: True if API call exceeded retries. False
otherwise
"""
super(APICallEvent, self).__init__(
service=service, operation=operation, timestamp=timestamp)
self.latency = latency
self.attempts = attempts
if attempts is None:
self.attempts = []
self.retries_exceeded = retries_exceeded
def new_api_call_attempt(self, timestamp):
"""Instantiates APICallAttemptEvent associated to the APICallEvent
:type timestamp: int
:param timestamp: Epoch time in milliseconds to associate to the
APICallAttemptEvent
"""
attempt_event = APICallAttemptEvent(
service=self.service,
operation=self.operation,
timestamp=timestamp
)
self.attempts.append(attempt_event)
return attempt_event
class APICallAttemptEvent(BaseMonitorEvent):
def __init__(self, service, operation, timestamp,
latency=None, url=None, http_status_code=None,
request_headers=None, response_headers=None,
parsed_error=None, wire_exception=None):
"""Monitor event for a single API call attempt
This event corresponds to a single HTTP request attempt in completing
the entire client method call.
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the HTTP request
started
:type latency: int
:param latency: The time in milliseconds to complete the HTTP request
whether it succeeded or failed
:type url: str
:param url: The URL the attempt was sent to
:type http_status_code: int
:param http_status_code: The HTTP status code of the HTTP response
if there was a response
:type request_headers: dict
:param request_headers: The HTTP headers sent in making the HTTP
request
:type response_headers: dict
:param response_headers: The HTTP headers returned in the HTTP response
if there was a response
:type parsed_error: dict
:param parsed_error: The error parsed if the service returned an
error back
:type wire_exception: Exception
:param wire_exception: The exception raised in sending the HTTP
request (i.e. ConnectionError)
"""
super(APICallAttemptEvent, self).__init__(
service=service, operation=operation, timestamp=timestamp
)
self.latency = latency
self.url = url
self.http_status_code = http_status_code
self.request_headers = request_headers
self.response_headers = response_headers
self.parsed_error = parsed_error
self.wire_exception = wire_exception
class CSMSerializer(object):
_MAX_CLIENT_ID_LENGTH = 255
_MAX_EXCEPTION_CLASS_LENGTH = 128
_MAX_ERROR_CODE_LENGTH = 128
_MAX_USER_AGENT_LENGTH = 256
_MAX_MESSAGE_LENGTH = 512
_RESPONSE_HEADERS_TO_EVENT_ENTRIES = {
'x-amzn-requestid': 'XAmznRequestId',
'x-amz-request-id': 'XAmzRequestId',
'x-amz-id-2': 'XAmzId2',
}
_AUTH_REGEXS = {
'v4': re.compile(
r'AWS4-HMAC-SHA256 '
r'Credential=(?P<access_key>\w+)/\d+/'
r'(?P<signing_region>[a-z0-9-]+)/'
),
's3': re.compile(
r'AWS (?P<access_key>\w+):'
)
}
_SERIALIZEABLE_EVENT_PROPERTIES = [
'service',
'operation',
'timestamp',
'attempts',
'latency',
'retries_exceeded',
'url',
'request_headers',
'http_status_code',
'response_headers',
'parsed_error',
'wire_exception',
]
def __init__(self, csm_client_id):
"""Serializes monitor events to CSM (Client Side Monitoring) format
:type csm_client_id: str
:param csm_client_id: The application identifier to associate
to the serialized events
"""
self._validate_client_id(csm_client_id)
self.csm_client_id = csm_client_id
def _validate_client_id(self, csm_client_id):
if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH:
raise ValueError(
'The value provided for csm_client_id: %s exceeds the '
'maximum length of %s characters' % (
csm_client_id, self._MAX_CLIENT_ID_LENGTH)
)
def serialize(self, event):
"""Serializes a monitor event to the CSM format
:type event: BaseMonitorEvent
:param event: The event to serialize to bytes
:rtype: bytes
:returns: The CSM serialized form of the event
"""
event_dict = self._get_base_event_dict(event)
event_type = self._get_event_type(event)
event_dict['Type'] = event_type
for attr in self._SERIALIZEABLE_EVENT_PROPERTIES:
value = getattr(event, attr, None)
if value is not None:
getattr(self, '_serialize_' + attr)(
value, event_dict, event_type=event_type)
return ensure_bytes(
json.dumps(event_dict, separators=(',', ':')))
def _get_base_event_dict(self, event):
return {
'Version': 1,
'ClientId': self.csm_client_id,
}
def _serialize_service(self, service, event_dict, **kwargs):
event_dict['Service'] = service
def _serialize_operation(self, operation, event_dict, **kwargs):
event_dict['Api'] = operation
def _serialize_timestamp(self, timestamp, event_dict, **kwargs):
event_dict['Timestamp'] = timestamp
def _serialize_attempts(self, attempts, event_dict, **kwargs):
event_dict['AttemptCount'] = len(attempts)
if attempts:
self._add_fields_from_last_attempt(event_dict, attempts[-1])
def _add_fields_from_last_attempt(self, event_dict, last_attempt):
if last_attempt.request_headers:
# It does not matter which attempt to use to grab the region
# for the ApiCall event, but SDKs typically do the last one.
region = self._get_region(last_attempt.request_headers)
if region is not None:
event_dict['Region'] = region
event_dict['UserAgent'] = self._get_user_agent(
last_attempt.request_headers)
if last_attempt.http_status_code is not None:
event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code
if last_attempt.parsed_error is not None:
self._serialize_parsed_error(
last_attempt.parsed_error, event_dict, 'ApiCall')
if last_attempt.wire_exception is not None:
self._serialize_wire_exception(
last_attempt.wire_exception, event_dict, 'ApiCall')
def _serialize_latency(self, latency, event_dict, event_type):
if event_type == 'ApiCall':
event_dict['Latency'] = latency
elif event_type == 'ApiCallAttempt':
event_dict['AttemptLatency'] = latency
def _serialize_retries_exceeded(self, retries_exceeded, event_dict,
**kwargs):
event_dict['MaxRetriesExceeded'] = (1 if retries_exceeded else 0)
def _serialize_url(self, url, event_dict, **kwargs):
event_dict['Fqdn'] = urlparse(url).netloc
def _serialize_request_headers(self, request_headers, event_dict,
**kwargs):
event_dict['UserAgent'] = self._get_user_agent(request_headers)
if self._is_signed(request_headers):
event_dict['AccessKey'] = self._get_access_key(request_headers)
region = self._get_region(request_headers)
if region is not None:
event_dict['Region'] = region
if 'X-Amz-Security-Token' in request_headers:
event_dict['SessionToken'] = request_headers[
'X-Amz-Security-Token']
def _serialize_http_status_code(self, http_status_code, event_dict,
**kwargs):
event_dict['HttpStatusCode'] = http_status_code
def _serialize_response_headers(self, response_headers, event_dict,
**kwargs):
for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items():
if header in response_headers:
event_dict[entry] = response_headers[header]
def _serialize_parsed_error(self, parsed_error, event_dict, event_type,
**kwargs):
field_prefix = 'Final' if event_type == 'ApiCall' else ''
event_dict[field_prefix + 'AwsException'] = self._truncate(
parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH)
event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate(
parsed_error['Message'], self._MAX_MESSAGE_LENGTH)
def _serialize_wire_exception(self, wire_exception, event_dict, event_type,
**kwargs):
field_prefix = 'Final' if event_type == 'ApiCall' else ''
event_dict[field_prefix + 'SdkException'] = self._truncate(
wire_exception.__class__.__name__,
self._MAX_EXCEPTION_CLASS_LENGTH)
event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate(
str(wire_exception), self._MAX_MESSAGE_LENGTH)
def _get_event_type(self, event):
if isinstance(event, APICallEvent):
return 'ApiCall'
elif isinstance(event, APICallAttemptEvent):
return 'ApiCallAttempt'
def _get_access_key(self, request_headers):
auth_val = self._get_auth_value(request_headers)
_, auth_match = self._get_auth_match(auth_val)
return auth_match.group('access_key')
def _get_region(self, request_headers):
if not self._is_signed(request_headers):
return None
auth_val = self._get_auth_value(request_headers)
signature_version, auth_match = self._get_auth_match(auth_val)
if signature_version != 'v4':
return None
return auth_match.group('signing_region')
def _get_user_agent(self, request_headers):
return self._truncate(
ensure_unicode(request_headers.get('User-Agent', '')),
self._MAX_USER_AGENT_LENGTH
)
def _is_signed(self, request_headers):
return 'Authorization' in request_headers
def _get_auth_value(self, request_headers):
return ensure_unicode(request_headers['Authorization'])
def _get_auth_match(self, auth_val):
for signature_version, regex in self._AUTH_REGEXS.items():
match = regex.match(auth_val)
if match:
return signature_version, match
return None, None
def _truncate(self, text, max_length):
if len(text) > max_length:
logger.debug(
'Truncating following value to maximum length of '
'%s: %s', text, max_length)
return text[:max_length]
return text
class SocketPublisher(object):
_MAX_MONITOR_EVENT_LENGTH = 8 * 1024
def __init__(self, socket, host, port, serializer):
"""Publishes monitor events to a socket
:type socket: socket.socket
:param socket: The socket object to use to publish events
:type host: string
:param host: The host to send events to
:type port: integer
:param port: The port on the host to send events to
:param serializer: The serializer to use to serialize the event
to a form that can be published to the socket. This must
have a `serialize()` method that accepts a monitor event
and return bytes
"""
self._socket = socket
self._address = (host, port)
self._serializer = serializer
def publish(self, event):
"""Publishes a specified monitor event
:type event: BaseMonitorEvent
:param event: The monitor event to be sent
over the publisher's socket to the desired address.
"""
serialized_event = self._serializer.serialize(event)
if len(serialized_event) > self._MAX_MONITOR_EVENT_LENGTH:
logger.debug(
'Serialized event of size %s exceeds the maximum length '
'allowed: %s. Not sending event to socket.',
len(serialized_event), self._MAX_MONITOR_EVENT_LENGTH
)
return
self._socket.sendto(serialized_event, self._address)
|
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GN.
MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import platform
import pprint
import re
import shutil
import sys
import subprocess
import tempfile
import traceback
import urllib2
from collections import OrderedDict
CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
import gn_helpers
def main(args):
mbw = MetaBuildWrapper()
return mbw.Main(args)
class MetaBuildWrapper(object):
def __init__(self):
self.chromium_src_dir = CHROMIUM_SRC_DIR
self.default_config = os.path.join(self.chromium_src_dir, 'infra', 'mb',
'mb_config.pyl')
self.default_isolate_map = os.path.join(self.chromium_src_dir, 'infra',
'mb', 'gn_isolate_map.pyl')
self.executable = sys.executable
self.platform = sys.platform
self.sep = os.sep
self.args = argparse.Namespace()
self.configs = {}
self.luci_tryservers = {}
self.masters = {}
self.mixins = {}
def Main(self, args):
self.ParseArgs(args)
try:
ret = self.args.func()
if ret:
self.DumpInputFiles()
return ret
except KeyboardInterrupt:
self.Print('interrupted, exiting')
return 130
except Exception:
self.DumpInputFiles()
s = traceback.format_exc()
for l in s.splitlines():
self.Print(l)
return 1
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('--phase',
help='optional phase name (used when builders '
'do multiple compiles with different '
'arguments in a single build)')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is %(default)s)')
subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
help='path to isolate map file '
'(default is %(default)s)',
default=[],
action='append',
dest='isolate_map_files')
subp.add_argument('-g', '--goma-dir',
help='path to goma directory')
subp.add_argument('--android-version-code',
help='Sets GN arg android_default_version_code')
subp.add_argument('--android-version-name',
help='Sets GN arg android_default_version_name')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v', '--verbose', action='store_true',
help='verbose logging')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('export',
help='print out the expanded configuration for'
'each builder as a JSON object')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file (default is %(default)s)')
subp.add_argument('-g', '--goma-dir',
help='path to goma directory')
subp.set_defaults(func=self.CmdExport)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('isolate',
help='generate the .isolate files for a given'
'binary')
AddCommonOptions(subp)
subp.add_argument('path', nargs=1,
help='path build was generated into')
subp.add_argument('target', nargs=1,
help='ninja target to generate the isolate for')
subp.set_defaults(func=self.CmdIsolate)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser(
'run',
help='build and run the isolated version of a '
'binary',
formatter_class=argparse.RawDescriptionHelpFormatter)
subp.description = (
'Build, isolate, and run the given binary with the command line\n'
'listed in the isolate. You may pass extra arguments after the\n'
'target; use "--" if the extra arguments need to include switches.\n'
'\n'
'Examples:\n'
'\n'
' % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
' //out/Default content_browsertests\n'
'\n'
' % tools/mb/mb.py run out/Default content_browsertests\n'
'\n'
' % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
' --test-launcher-retry-limit=0'
'\n'
)
AddCommonOptions(subp)
subp.add_argument('-j', '--jobs', dest='jobs', type=int,
help='Number of jobs to pass to ninja')
subp.add_argument('--no-build', dest='build', default=True,
action='store_false',
help='Do not build, just isolate and run')
subp.add_argument('path', nargs=1,
help=('path to generate build into (or use).'
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
subp.add_argument('-s', '--swarmed', action='store_true',
help='Run under swarming with the default dimensions')
subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
subp.add_argument('--no-default-dimensions', action='store_false',
dest='default_dimensions', default=True,
help='Do not automatically add dimensions to the task')
subp.add_argument('target', nargs=1,
help='ninja target to build and run')
subp.add_argument('extra_args', nargs='*',
help=('extra args to pass to the isolate to run. Use '
'"--" as the first arg if you need to pass '
'switches'))
subp.set_defaults(func=self.CmdRun)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file (default is %(default)s)')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('gerrit-buildbucket-config',
help='Print buildbucket.config for gerrit '
'(see MB user guide)')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file (default is %(default)s)')
subp.set_defaults(func=self.CmdBuildbucket)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def DumpInputFiles(self):
def DumpContentsOfFilePassedTo(arg_name, path):
if path and self.Exists(path):
self.Print("\n# To recreate the file passed to %s:" % arg_name)
self.Print("%% cat > %s <<EOF" % path)
contents = self.ReadFile(path)
self.Print(contents)
self.Print("EOF\n%\n")
if getattr(self.args, 'input_path', None):
DumpContentsOfFilePassedTo(
'argv[0] (input_path)', self.args.input_path[0])
if getattr(self.args, 'swarming_targets_file', None):
DumpContentsOfFilePassedTo(
'--swarming-targets-file', self.args.swarming_targets_file)
def CmdAnalyze(self):
vals = self.Lookup()
return self.RunGNAnalyze(vals)
def CmdExport(self):
self.ReadConfigFile()
obj = {}
for master, builders in self.masters.items():
obj[master] = {}
for builder in builders:
config = self.masters[master][builder]
if not config:
continue
if isinstance(config, dict):
args = {k: self.FlattenConfig(v)['gn_args']
for k, v in config.items()}
elif config.startswith('//'):
args = config
else:
args = self.FlattenConfig(config)['gn_args']
if 'error' in args:
continue
obj[master][builder] = args
# Dump object and trim trailing whitespace.
s = '\n'.join(l.rstrip() for l in
json.dumps(obj, sort_keys=True, indent=2).splitlines())
self.Print(s)
return 0
def CmdGen(self):
vals = self.Lookup()
return self.RunGNGen(vals)
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdIsolate(self):
vals = self.GetConfig()
if not vals:
return 1
return self.RunGNIsolate()
def CmdLookup(self):
vals = self.Lookup()
cmd = self.GNCmd('gen', '_path_')
gn_args = self.GNArgs(vals)
self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
env = None
self.PrintCmd(cmd, env)
return 0
def CmdRun(self):
vals = self.GetConfig()
if not vals:
return 1
build_dir = self.args.path[0]
target = self.args.target[0]
if self.args.build:
ret = self.Build(target)
if ret:
return ret
ret = self.RunGNIsolate()
if ret:
return ret
if self.args.swarmed:
return self._RunUnderSwarming(build_dir, target)
else:
return self._RunLocallyIsolated(build_dir, target)
def _RunUnderSwarming(self, build_dir, target):
# TODO(dpranke): Look up the information for the target in
# the //testing/buildbot.json file, if possible, so that we
# can determine the isolate target, command line, and additional
# swarming parameters, if possible.
#
# TODO(dpranke): Also, add support for sharding and merging results.
dimensions = []
for k, v in self._DefaultDimensions() + self.args.dimensions:
dimensions += ['-d', k, v]
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'archive',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'-I', 'isolateserver.appspot.com',
]
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret:
return ret
isolated_hash = out.splitlines()[0].split()[0]
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'swarming.py'),
'run',
'-s', isolated_hash,
'-I', 'isolateserver.appspot.com',
'-S', 'chromium-swarm.appspot.com',
] + dimensions
if self.args.extra_args:
cmd += ['--'] + self.args.extra_args
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
return ret
def _RunLocallyIsolated(self, build_dir, target):
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'run',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
]
if self.args.extra_args:
cmd += ['--'] + self.args.extra_args
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
return ret
def _DefaultDimensions(self):
if not self.args.default_dimensions:
return []
# This code is naive and just picks reasonable defaults per platform.
if self.platform == 'darwin':
os_dim = ('os', 'Mac-10.12')
elif self.platform.startswith('linux'):
os_dim = ('os', 'Ubuntu-14.04')
elif self.platform == 'win32':
os_dim = ('os', 'Windows-10')
else:
raise MBErr('unrecognized platform string "%s"' % self.platform)
return [('pool', 'Chrome'),
('cpu', 'x86-64'),
os_dim]
def CmdBuildbucket(self):
self.ReadConfigFile()
self.Print('# This file was generated using '
'"tools/mb/mb.py gerrit-buildbucket-config".')
for luci_tryserver in sorted(self.luci_tryservers):
self.Print('[bucket "luci.%s"]' % luci_tryserver)
for bot in sorted(self.luci_tryservers[luci_tryserver]):
self.Print('\tbuilder = %s' % bot)
for master in sorted(self.masters):
if master.startswith('tryserver.'):
self.Print('[bucket "master.%s"]' % master)
for bot in sorted(self.masters[master]):
self.Print('\tbuilder = %s' % bot)
return 0
def CmdValidate(self, print_ok=True):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Build a list of all of the configs referenced by builders.
all_configs = {}
for master in self.masters:
for config in self.masters[master].values():
if isinstance(config, dict):
for c in config.values():
all_configs[c] = master
else:
all_configs[config] = master
# Check that every referenced args file or config actually exists.
for config, loc in all_configs.items():
if config.startswith('//'):
if not self.Exists(self.ToAbsPath(config)):
errs.append('Unknown args file "%s" referenced from "%s".' %
(config, loc))
elif not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
'\n ' + '\n '.join(errs))
if print_ok:
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
build_dir = self.args.path[0]
vals = self.DefaultVals()
if self.args.builder or self.args.master or self.args.config:
vals = self.Lookup()
# Re-run gn gen in order to ensure the config is consistent with the
# build dir.
self.RunGNGen(vals)
return vals
toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
'toolchain.ninja')
if not self.Exists(toolchain_path):
self.Print('Must either specify a path to an existing GN build dir '
'or pass in a -m/-b pair or a -c flag to specify the '
'configuration')
return {}
vals['gn_args'] = self.GNArgsFromDir(build_dir)
return vals
def GNArgsFromDir(self, build_dir):
args_contents = ""
gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
if self.Exists(gn_args_path):
args_contents = self.ReadFile(gn_args_path)
gn_args = []
for l in args_contents.splitlines():
fields = l.split(' ')
name = fields[0]
val = ' '.join(fields[2:])
gn_args.append('%s=%s' % (name, val))
return ' '.join(gn_args)
def Lookup(self):
vals = self.ReadIOSBotConfig()
if not vals:
self.ReadConfigFile()
config = self.ConfigFromArgs()
if config.startswith('//'):
if not self.Exists(self.ToAbsPath(config)):
raise MBErr('args file "%s" not found' % config)
vals = self.DefaultVals()
vals['args_file'] = config
else:
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
vals = self.FlattenConfig(config)
return vals
def ReadIOSBotConfig(self):
if not self.args.master or not self.args.builder:
return {}
path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
self.args.master, self.args.builder + '.json')
if not self.Exists(path):
return {}
contents = json.loads(self.ReadFile(path))
gn_args = ' '.join(contents.get('gn_args', []))
vals = self.DefaultVals()
vals['gn_args'] = gn_args
return vals
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.configs = contents['configs']
self.luci_tryservers = contents.get('luci_tryservers', {})
self.masters = contents['masters']
self.mixins = contents['mixins']
def ReadIsolateMap(self):
if not self.args.isolate_map_files:
self.args.isolate_map_files = [self.default_isolate_map]
for f in self.args.isolate_map_files:
if not self.Exists(f):
raise MBErr('isolate map file not found at %s' % f)
isolate_maps = {}
for isolate_map in self.args.isolate_map_files:
try:
isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
duplicates = set(isolate_map).intersection(isolate_maps)
if duplicates:
raise MBErr(
'Duplicate targets in isolate map files: %s.' %
', '.join(duplicates))
isolate_maps.update(isolate_map)
except SyntaxError as e:
raise MBErr(
'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
return isolate_maps
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
config = self.masters[self.args.master][self.args.builder]
if isinstance(config, dict):
if self.args.phase is None:
raise MBErr('Must specify a build --phase for %s on %s' %
(self.args.builder, self.args.master))
phase = str(self.args.phase)
if phase not in config:
raise MBErr('Phase %s doesn\'t exist for %s on %s' %
(phase, self.args.builder, self.args.master))
return config[phase]
if self.args.phase is not None:
raise MBErr('Must not specify a build --phase for %s on %s' %
(self.args.builder, self.args.master))
return config
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = self.DefaultVals()
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def DefaultVals(self):
return {
'args_file': '',
'cros_passthrough': False,
'gn_args': '',
}
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
visited.append(m)
mixin_vals = self.mixins[m]
if 'cros_passthrough' in mixin_vals:
vals['cros_passthrough'] = mixin_vals['cros_passthrough']
if 'args_file' in mixin_vals:
if vals['args_file']:
raise MBErr('args_file specified multiple times in mixins '
'for %s on %s' % (self.args.builder, self.args.master))
vals['args_file'] = mixin_vals['args_file']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def RunGNGen(self, vals, compute_grit_inputs_for_analyze=False):
build_dir = self.args.path[0]
cmd = self.GNCmd('gen', build_dir, '--check')
gn_args = self.GNArgs(vals)
if compute_grit_inputs_for_analyze:
gn_args += ' compute_grit_inputs_for_analyze=true'
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
self.WriteFile(gn_args_path, gn_args, force_verbose=True)
swarming_targets = []
if getattr(self.args, 'swarming_targets_file', None):
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use gn_isolate_map.pyl to convert
# the compile targets to the matching GN labels.
path = self.args.swarming_targets_file
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path,
output_path=None)
contents = self.ReadFile(path)
swarming_targets = set(contents.splitlines())
isolate_map = self.ReadIsolateMap()
err, labels = self.MapTargetsToLabels(isolate_map, swarming_targets)
if err:
raise MBErr(err)
gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
if ret:
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
return ret
android = 'target_os="android"' in vals['gn_args']
fuchsia = 'target_os="fuchsia"' in vals['gn_args']
for target in swarming_targets:
if android:
# Android targets may be either android_apk or executable. The former
# will result in runtime_deps associated with the stamp file, while the
# latter will result in runtime_deps associated with the executable.
label = isolate_map[target]['label']
runtime_deps_targets = [
target + '.runtime_deps',
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
elif fuchsia:
# Only emit a runtime deps file for the group() target on Fuchsia.
label = isolate_map[target]['label']
runtime_deps_targets = [
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
elif (isolate_map[target]['type'] == 'script' or
isolate_map[target].get('label_type') == 'group'):
# For script targets, the build target is usually a group,
# for which gn generates the runtime_deps next to the stamp file
# for the label, which lives under the obj/ directory, but it may
# also be an executable.
label = isolate_map[target]['label']
runtime_deps_targets = [
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
if self.platform == 'win32':
runtime_deps_targets += [ target + '.exe.runtime_deps' ]
else:
runtime_deps_targets += [ target + '.runtime_deps' ]
elif self.platform == 'win32':
runtime_deps_targets = [target + '.exe.runtime_deps']
else:
runtime_deps_targets = [target + '.runtime_deps']
for r in runtime_deps_targets:
runtime_deps_path = self.ToAbsPath(build_dir, r)
if self.Exists(runtime_deps_path):
break
else:
raise MBErr('did not generate any of %s' %
', '.join(runtime_deps_targets))
runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
self.WriteIsolateFiles(build_dir, target, runtime_deps)
return 0
def RunGNIsolate(self):
target = self.args.target[0]
isolate_map = self.ReadIsolateMap()
err, labels = self.MapTargetsToLabels(isolate_map, [target])
if err:
raise MBErr(err)
label = labels[0]
build_dir = self.args.path[0]
cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps')
ret, out, _ = self.Call(cmd)
if ret:
if out:
self.Print(out)
return ret
runtime_deps = out.splitlines()
self.WriteIsolateFiles(build_dir, target, runtime_deps)
ret, _, _ = self.Run([
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'check',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))],
buffer_output=False)
return ret
def WriteIsolateFiles(self, build_dir, target, runtime_deps):
isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'files': sorted(runtime_deps),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
def MapTargetsToLabels(self, isolate_map, targets):
labels = []
err = ''
for target in targets:
if target == 'all':
labels.append(target)
elif target.startswith('//'):
labels.append(target)
else:
if target in isolate_map:
if isolate_map[target]['type'] == 'unknown':
err += ('test target "%s" type is unknown\n' % target)
else:
labels.append(isolate_map[target]['label'])
else:
err += ('target "%s" not found in '
'//infra/mb/gn_isolate_map.pyl\n' % target)
return err, labels
def GNCmd(self, subcommand, path, *args):
if self.platform == 'linux2':
subdir, exe = 'linux64', 'gn'
elif self.platform == 'darwin':
subdir, exe = 'mac', 'gn'
else:
subdir, exe = 'win', 'gn.exe'
arch = platform.machine()
if (arch.startswith('s390') or arch.startswith('ppc') or
self.platform.startswith('aix')):
# use gn in PATH
gn_path = 'gn'
else:
gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
return [gn_path, subcommand, path] + list(args)
def GNArgs(self, vals):
if vals['cros_passthrough']:
if not 'GN_ARGS' in os.environ:
raise MBErr('MB is expecting GN_ARGS to be in the environment')
gn_args = os.environ['GN_ARGS']
if not re.search('target_os.*=.*"chromeos"', gn_args):
raise MBErr('GN_ARGS is missing target_os = "chromeos": (GN_ARGS=%s)' %
gn_args)
else:
gn_args = vals['gn_args']
if self.args.goma_dir:
gn_args += ' goma_dir="%s"' % self.args.goma_dir
android_version_code = self.args.android_version_code
if android_version_code:
gn_args += ' android_default_version_code="%s"' % android_version_code
android_version_name = self.args.android_version_name
if android_version_name:
gn_args += ' android_default_version_name="%s"' % android_version_name
# Canonicalize the arg string into a sorted, newline-separated list
# of key-value pairs, and de-dup the keys if need be so that only
# the last instance of each arg is listed.
gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
args_file = vals.get('args_file', None)
if args_file:
gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
return gn_args
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
if path.startswith('//'):
return path[2:].replace('/', self.sep)
return self.RelPath(path, self.chromium_src_dir)
def RunGNAnalyze(self, vals):
# Analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals, compute_grit_inputs_for_analyze=True)
if ret:
return ret
build_path = self.args.path[0]
input_path = self.args.input_path[0]
gn_input_path = input_path + '.gn'
output_path = self.args.output_path[0]
gn_output_path = output_path + '.gn'
inp = self.ReadInputJSON(['files', 'test_targets',
'additional_compile_targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
}, output_path)
return 0
gn_inp = {}
gn_inp['files'] = ['//' + f for f in inp['files'] if not f.startswith('//')]
isolate_map = self.ReadIsolateMap()
err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels(
isolate_map, inp['additional_compile_targets'])
if err:
raise MBErr(err)
err, gn_inp['test_targets'] = self.MapTargetsToLabels(
isolate_map, inp['test_targets'])
if err:
raise MBErr(err)
labels_to_targets = {}
for i, label in enumerate(gn_inp['test_targets']):
labels_to_targets[label] = inp['test_targets'][i]
try:
self.WriteJSON(gn_inp, gn_input_path)
cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
ret, _, _ = self.Run(cmd, force_verbose=True)
if ret:
return ret
gn_outp_str = self.ReadFile(gn_output_path)
try:
gn_outp = json.loads(gn_outp_str)
except Exception as e:
self.Print("Failed to parse the JSON string GN returned: %s\n%s"
% (repr(gn_outp_str), str(e)))
raise
outp = {}
if 'status' in gn_outp:
outp['status'] = gn_outp['status']
if 'error' in gn_outp:
outp['error'] = gn_outp['error']
if 'invalid_targets' in gn_outp:
outp['invalid_targets'] = gn_outp['invalid_targets']
if 'compile_targets' in gn_outp:
all_input_compile_targets = sorted(
set(inp['test_targets'] + inp['additional_compile_targets']))
# If we're building 'all', we can throw away the rest of the targets
# since they're redundant.
if 'all' in gn_outp['compile_targets']:
outp['compile_targets'] = ['all']
else:
outp['compile_targets'] = gn_outp['compile_targets']
# crbug.com/736215: When GN returns targets back, for targets in
# the default toolchain, GN will have generated a phony ninja
# target matching the label, and so we can safely (and easily)
# transform any GN label into the matching ninja target. For
# targets in other toolchains, though, GN doesn't generate the
# phony targets, and we don't know how to turn the labels into
# compile targets. In this case, we also conservatively give up
# and build everything. Probably the right thing to do here is
# to have GN return the compile targets directly.
if any("(" in target for target in outp['compile_targets']):
self.Print('WARNING: targets with non-default toolchains were '
'found, building everything instead.')
outp['compile_targets'] = all_input_compile_targets
else:
outp['compile_targets'] = [
label.replace('//', '') for label in outp['compile_targets']]
# Windows has a maximum command line length of 8k; even Linux
# maxes out at 128k; if analyze returns a *really long* list of
# targets, we just give up and conservatively build everything instead.
# Probably the right thing here is for ninja to support response
# files as input on the command line
# (see https://github.com/ninja-build/ninja/issues/1355).
if len(' '.join(outp['compile_targets'])) > 7*1024:
self.Print('WARNING: Too many compile targets were affected.')
self.Print('WARNING: Building everything instead to avoid '
'command-line length issues.')
outp['compile_targets'] = all_input_compile_targets
if 'test_targets' in gn_outp:
outp['test_targets'] = [
labels_to_targets[label] for label in gn_outp['test_targets']]
if self.args.verbose:
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
self.WriteJSON(outp, output_path)
finally:
if self.Exists(gn_input_path):
self.RemoveFile(gn_input_path)
if self.Exists(gn_output_path):
self.RemoveFile(gn_output_path)
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path, force_verbose=True)
raise MBErr(msg)
def WriteJSON(self, obj, path, force_verbose=False):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
force_verbose=force_verbose)
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def CheckCompile(self, master, builder):
url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1'
url = urllib2.quote(url_template.format(master=master, builder=builder),
safe=':/()?=')
try:
builds = json.loads(self.Fetch(url))
except Exception as e:
return str(e)
successes = sorted(
[int(x) for x in builds.keys() if "text" in builds[x] and
cmp(builds[x]["text"][:2], ["build", "successful"]) == 0],
reverse=True)
if not successes:
return "no successful builds"
build = builds[str(successes[0])]
step_names = set([step["name"] for step in build["steps"]])
compile_indicators = set(["compile", "compile (with patch)", "analyze"])
if compile_indicators & step_names:
return "compiles"
return "does not compile"
def PrintCmd(self, cmd, env):
if self.platform == 'win32':
env_prefix = 'set '
env_quoter = QuoteForSet
shell_quoter = QuoteForCmd
else:
env_prefix = ''
env_quoter = pipes.quote
shell_quoter = pipes.quote
def print_env(var):
if env and var in env:
self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
print_env('LLVM_FORCE_HEAD_REVISION')
if cmd[0] == self.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[shell_quoter(arg) for arg in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Build(self, target):
build_dir = self.ToSrcRelPath(self.args.path[0])
ninja_cmd = ['ninja', '-C', build_dir]
if self.args.jobs:
ninja_cmd.extend(['-j', '%d' % self.args.jobs])
ninja_cmd.append(target)
ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False)
return ret
def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.PrintCmd(cmd, env)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
if self.args.verbose or force_verbose:
if ret:
self.Print(' -> returned %d' % ret)
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd, env=None, buffer_output=True):
if buffer_output:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
else:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
env=env)
p.wait()
out = err = ''
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def Fetch(self, url):
# This function largely exists so it can be overridden for testing.
f = urllib2.urlopen(url)
contents = f.read()
f.close()
return contents
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def PathJoin(self, *comps):
# This function largely exists so it can be overriden for testing.
return os.path.join(*comps)
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
if kwargs.get('stream', sys.stdout) == sys.stdout:
sys.stdout.flush()
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RelPath(self, path, start='.'):
# This function largely exists so it can be overriden for testing.
return os.path.relpath(path, start)
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def RemoveDirectory(self, abs_path):
if self.platform == 'win32':
# In other places in chromium, we often have to retry this command
# because we're worried about other processes still holding on to
# file handles, but when MB is invoked, it will be early enough in the
# build that their should be no other processes to interfere. We
# can change this if need be.
self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
else:
shutil.rmtree(abs_path, ignore_errors=True)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents, force_verbose=False):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
# details of this next section, which handles escaping command lines
# so that they can be copied and pasted into a cmd window.
UNSAFE_FOR_SET = set('^<>&|')
UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
def QuoteForSet(arg):
if any(a in UNSAFE_FOR_SET for a in arg):
arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
return arg
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
# Then check to see if the arg contains any metacharacters other than
# double quotes; if it does, quote everything (including the double
# quotes) for safety.
if any(a in UNSAFE_FOR_CMD for a in arg):
arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
return arg
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
import unittest
from pyramid import testing
class NewRequestEventTests(unittest.TestCase):
def _getTargetClass(self):
from pyramid.events import NewRequest
return NewRequest
def _makeOne(self, request):
return self._getTargetClass()(request)
def test_class_conforms_to_INewRequest(self):
from pyramid.interfaces import INewRequest
from zope.interface.verify import verifyClass
klass = self._getTargetClass()
verifyClass(INewRequest, klass)
def test_instance_conforms_to_INewRequest(self):
from pyramid.interfaces import INewRequest
from zope.interface.verify import verifyObject
request = DummyRequest()
inst = self._makeOne(request)
verifyObject(INewRequest, inst)
def test_ctor(self):
request = DummyRequest()
inst = self._makeOne(request)
self.assertEqual(inst.request, request)
class NewResponseEventTests(unittest.TestCase):
def _getTargetClass(self):
from pyramid.events import NewResponse
return NewResponse
def _makeOne(self, request, response):
return self._getTargetClass()(request, response)
def test_class_conforms_to_INewResponse(self):
from pyramid.interfaces import INewResponse
from zope.interface.verify import verifyClass
klass = self._getTargetClass()
verifyClass(INewResponse, klass)
def test_instance_conforms_to_INewResponse(self):
from pyramid.interfaces import INewResponse
from zope.interface.verify import verifyObject
request = DummyRequest()
response = DummyResponse()
inst = self._makeOne(request, response)
verifyObject(INewResponse, inst)
def test_ctor(self):
request = DummyRequest()
response = DummyResponse()
inst = self._makeOne(request, response)
self.assertEqual(inst.request, request)
self.assertEqual(inst.response, response)
class ApplicationCreatedEventTests(unittest.TestCase):
def _getTargetClass(self):
from pyramid.events import ApplicationCreated
return ApplicationCreated
def _makeOne(self, context=object()):
return self._getTargetClass()(context)
def test_class_conforms_to_IApplicationCreated(self):
from pyramid.interfaces import IApplicationCreated
from zope.interface.verify import verifyClass
verifyClass(IApplicationCreated, self._getTargetClass())
def test_object_conforms_to_IApplicationCreated(self):
from pyramid.interfaces import IApplicationCreated
from zope.interface.verify import verifyObject
verifyObject(IApplicationCreated, self._makeOne())
class WSGIApplicationCreatedEventTests(ApplicationCreatedEventTests):
def _getTargetClass(self):
from pyramid.events import WSGIApplicationCreatedEvent
return WSGIApplicationCreatedEvent
def test_class_conforms_to_IWSGIApplicationCreatedEvent(self):
from pyramid.interfaces import IWSGIApplicationCreatedEvent
from zope.interface.verify import verifyClass
verifyClass(IWSGIApplicationCreatedEvent, self._getTargetClass())
def test_object_conforms_to_IWSGIApplicationCreatedEvent(self):
from pyramid.interfaces import IWSGIApplicationCreatedEvent
from zope.interface.verify import verifyObject
verifyObject(IWSGIApplicationCreatedEvent, self._makeOne())
class ContextFoundEventTests(unittest.TestCase):
def _getTargetClass(self):
from pyramid.events import ContextFound
return ContextFound
def _makeOne(self, request=None):
if request is None:
request = DummyRequest()
return self._getTargetClass()(request)
def test_class_conforms_to_IContextFound(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IContextFound
verifyClass(IContextFound, self._getTargetClass())
def test_instance_conforms_to_IContextFound(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IContextFound
verifyObject(IContextFound, self._makeOne())
class AfterTraversalEventTests(ContextFoundEventTests):
def _getTargetClass(self):
from pyramid.events import AfterTraversal
return AfterTraversal
def test_class_conforms_to_IAfterTraversal(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IAfterTraversal
verifyClass(IAfterTraversal, self._getTargetClass())
def test_instance_conforms_to_IAfterTraversal(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IAfterTraversal
verifyObject(IAfterTraversal, self._makeOne())
class TestSubscriber(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, *ifaces, **predicates):
from pyramid.events import subscriber
return subscriber(*ifaces, **predicates)
def test_register_single(self):
from zope.interface import Interface
class IFoo(Interface): pass
class IBar(Interface): pass
dec = self._makeOne(IFoo)
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, IFoo)])
def test_register_multi(self):
from zope.interface import Interface
class IFoo(Interface): pass
class IBar(Interface): pass
dec = self._makeOne(IFoo, IBar)
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, IFoo), (foo, IBar)])
def test_register_none_means_all(self):
from zope.interface import Interface
dec = self._makeOne()
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, Interface)])
def test_register_objectevent(self):
from zope.interface import Interface
class IFoo(Interface): pass
class IBar(Interface): pass
dec = self._makeOne([IFoo, IBar])
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, [IFoo, IBar])])
def test___call__(self):
dec = self._makeOne()
dummy_venusian = DummyVenusian()
dec.venusian = dummy_venusian
def foo(): pass
dec(foo)
self.assertEqual(dummy_venusian.attached,
[(foo, dec.register, 'pyramid')])
def test_regsister_with_predicates(self):
from zope.interface import Interface
dec = self._makeOne(a=1)
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, Interface, {'a':1})])
class TestBeforeRender(unittest.TestCase):
def _makeOne(self, system, val=None):
from pyramid.events import BeforeRender
return BeforeRender(system, val)
def test_instance_conforms(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IBeforeRender
event = self._makeOne({})
verifyObject(IBeforeRender, event)
def test_setitem_success(self):
event = self._makeOne({})
event['a'] = 1
self.assertEqual(event, {'a':1})
def test_setdefault_fail(self):
event = self._makeOne({})
result = event.setdefault('a', 1)
self.assertEqual(result, 1)
self.assertEqual(event, {'a':1})
def test_setdefault_success(self):
event = self._makeOne({})
event['a'] = 1
result = event.setdefault('a', 2)
self.assertEqual(result, 1)
self.assertEqual(event, {'a':1})
def test_update_success(self):
event = self._makeOne({'a':1})
event.update({'b':2})
self.assertEqual(event, {'a':1, 'b':2})
def test__contains__True(self):
system = {'a':1}
event = self._makeOne(system)
self.assertTrue('a' in event)
def test__contains__False(self):
system = {}
event = self._makeOne(system)
self.assertFalse('a' in event)
def test__getitem__success(self):
system = {'a':1}
event = self._makeOne(system)
self.assertEqual(event['a'], 1)
def test__getitem__fail(self):
system = {}
event = self._makeOne(system)
self.assertRaises(KeyError, event.__getitem__, 'a')
def test_get_success(self):
system = {'a':1}
event = self._makeOne(system)
self.assertEqual(event.get('a'), 1)
def test_get_fail(self):
system = {}
event = self._makeOne(system)
self.assertEqual(event.get('a'), None)
def test_rendering_val(self):
system = {}
val = {}
event = self._makeOne(system, val)
self.assertTrue(event.rendering_val is val)
class DummyConfigurator(object):
def __init__(self):
self.subscribed = []
def add_subscriber(self, wrapped, ifaces, **predicates):
if not predicates:
self.subscribed.append((wrapped, ifaces))
else:
self.subscribed.append((wrapped, ifaces, predicates))
class DummyRegistry(object):
pass
class DummyVenusian(object):
def __init__(self):
self.attached = []
def attach(self, wrapped, fn, category=None):
self.attached.append((wrapped, fn, category))
class Dummy:
pass
class DummyRequest:
pass
class DummyResponse:
pass
|
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import random
import socket
import string
import json
from shlex import split
from subprocess import call
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charms.reactive.helpers import data_changed
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
dashboard_templates = [
'dashboard-controller.yaml',
'dashboard-service.yaml',
'influxdb-grafana-controller.yaml',
'influxdb-service.yaml',
'grafana-service.yaml',
'heapster-controller.yaml',
'heapster-service.yaml'
]
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
remove_state('kubernetes-master.components.started')
remove_state('kubernetes-master.components.installed')
remove_state('kube-dns.available')
remove_state('kubernetes.dashboard.available')
@when_not('kubernetes-master.components.installed')
def install():
'''Unpack and put the Kubernetes master files on the path.'''
# Get the resource via resource_get
try:
archive = hookenv.resource_get('kubernetes')
except Exception:
message = 'Error fetching the kubernetes resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing kubernetes resource.')
hookenv.status_set('blocked', 'Missing kubernetes resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete kubernetes resource.')
return
hookenv.status_set('maintenance', 'Unpacking kubernetes resource.')
files_dir = os.path.join(hookenv.charm_dir(), 'files')
os.makedirs(files_dir, exist_ok=True)
command = 'tar -xvzf {0} -C {1}'.format(archive, files_dir)
hookenv.log(command)
check_call(split(command))
apps = [
{'name': 'kube-apiserver', 'path': '/usr/local/bin'},
{'name': 'kube-controller-manager', 'path': '/usr/local/bin'},
{'name': 'kube-scheduler', 'path': '/usr/local/bin'},
{'name': 'kubectl', 'path': '/usr/local/bin'},
]
for app in apps:
unpacked = '{}/{}'.format(files_dir, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
set_state('kubernetes-master.components.installed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('kubernetes-master.components.installed')
@when_not('authentication.setup')
def setup_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
api_opts.add('--basic-auth-file', '/srv/kubernetes/basic_auth.csv')
api_opts.add('--token-auth-file', '/srv/kubernetes/known_tokens.csv')
api_opts.add('--service-cluster-ip-range', service_cidr())
hookenv.status_set('maintenance', 'Rendering authentication templates.')
htaccess = '/srv/kubernetes/basic_auth.csv'
if not os.path.isfile(htaccess):
setup_basic_auth('admin', 'admin', 'admin')
known_tokens = '/srv/kubernetes/known_tokens.csv'
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/etc/kubernetes', exist_ok=True)
cmd = ['openssl', 'genrsa', '-out', '/etc/kubernetes/serviceaccount.key',
'2048']
check_call(cmd)
api_opts.add('--service-account-key-file',
'/etc/kubernetes/serviceaccount.key')
controller_opts.add('--service-account-private-key-file',
'/etc/kubernetes/serviceaccount.key')
set_state('authentication.setup')
@when('kubernetes-master.components.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kube-dns.available', 'kubernetes-master.components.installed')
def idle_status():
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
hookenv.status_set('active', 'WARN: cannot change service-cidr, still using ' + service_cidr())
else:
hookenv.status_set('active', 'Kubernetes master running.')
@when('etcd.available', 'kubernetes-master.components.installed',
'certificates.server.cert.available')
@when_not('kubernetes-master.components.started')
def start_master(etcd, tls):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Rendering the Kubernetes master systemd files.')
freeze_service_cidr()
handle_etcd_relation(etcd)
# Use the etcd relation object to render files with etcd information.
render_files()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Starting {0} service.'.format(service))
host.service_start(service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('cluster-dns.connected')
def send_cluster_dns_detail(cluster_dns):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
cluster_dns.set_dns_info(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api.connected')
def push_api_data(kube_api):
''' Send configuration to remote consumer.'''
# Since all relations already have the private ip address, only
# send the port on the relation object to all consumers.
# The kubernetes api-server uses 6443 for the default secure port.
kube_api.set_api_port('6443')
@when('kubernetes-master.components.started', 'kube-dns.available')
@when_not('kubernetes.dashboard.available')
def install_dashboard_addons():
''' Launch dashboard addons if they are enabled in config '''
if hookenv.config('enable-dashboard-addons'):
hookenv.log('Launching kubernetes dashboard.')
context = {}
context['arch'] = arch()
try:
context['pillar'] = {'num_nodes': get_node_count()}
for template in dashboard_templates:
create_addon(template, context)
set_state('kubernetes.dashboard.available')
except CalledProcessError:
hookenv.log('Kubernetes dashboard waiting on kubeapi')
@when('kubernetes-master.components.started', 'kubernetes.dashboard.available')
def remove_dashboard_addons():
''' Removes dashboard addons if they are disabled in config '''
if not hookenv.config('enable-dashboard-addons'):
hookenv.log('Removing kubernetes dashboard.')
for template in dashboard_templates:
delete_addon(template)
remove_state('kubernetes.dashboard.available')
@when('kubernetes-master.components.started')
@when_not('kube-dns.available')
def start_kube_dns():
''' State guard to starting DNS '''
hookenv.status_set('maintenance', 'Deploying KubeDNS')
context = {
'arch': arch(),
# The dictionary named 'pillar' is a construct of the k8s template files.
'pillar': {
'dns_server': get_dns_ip(),
'dns_replicas': 1,
'dns_domain': hookenv.config('dns_domain')
}
}
try:
create_addon('kubedns-controller.yaml', context)
create_addon('kubedns-svc.yaml', context)
except CalledProcessError:
hookenv.status_set('waiting', 'Waiting to retry KubeDNS deployment')
return
set_state('kube-dns.available')
@when('kubernetes-master.components.installed', 'loadbalancer.available',
'certificates.ca.available', 'certificates.client.cert.available')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('kubernetes-master.components.installed',
'certificates.ca.available', 'certificates.client.cert.available')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
def create_addon(template, context):
'''Create an addon from a template'''
source = 'addons/' + template
target = '/etc/kubernetes/addons/' + template
render(source, target, context)
cmd = ['kubectl', 'apply', '-f', target]
check_call(cmd)
def delete_addon(template):
'''Delete an addon from a template'''
target = '/etc/kubernetes/addons/' + template
cmd = ['kubectl', 'delete', '-f', target]
call(cmd)
def get_node_count():
'''Return the number of Kubernetes nodes in the cluster'''
cmd = ['kubectl', 'get', 'nodes', '-o', 'name']
output = check_output(cmd)
node_count = len(output.splitlines())
return node_count
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
key = layer_options.get('client_key_path')
key_exists = key and os.path.isfile(key)
cert = layer_options.get('client_certificate_path')
cert_exists = cert and os.path.isfile(cert)
# Do we have everything we need?
if ca_exists and key_exists and cert_exists:
# Cache last server string to know if we need to regenerate the config.
if not data_changed('kubeconfig.server', server):
return
# The final destination of the kubeconfig and kubectl.
destination_directory = '/home/ubuntu'
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(destination_directory, 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca, key, cert)
# Copy the kubectl binary to the destination directory.
cmd = ['install', '-v', '-o', 'ubuntu', '-g', 'ubuntu',
'/usr/local/bin/kubectl', destination_directory]
check_call(cmd)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
# Remove the range from the cidr.
ip = service_cidr().split('/')[0]
# Take the last octet off the IP address and replace it with 10.
return '.'.join(ip.split('.')[0:-1]) + '.10'
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
# Remove the range from the cidr.
ip = service_cidr().split('/')[0]
# Remove the last octet and replace it with 1.
return '.'.join(ip.split('.')[0:-1]) + '.1'
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/etc/ssl/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('--etcd-servers-strict') or data.get('--etcd-servers'):
api_opts.destroy('--etcd-cafile')
api_opts.destroy('--etcd-keyfile')
api_opts.destroy('--etcd-certfile')
api_opts.destroy('--etcd-servers', strict=True)
api_opts.destroy('--etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('--etcd-cafile', ca)
api_opts.add('--etcd-keyfile', key)
api_opts.add('--etcd-certfile', cert)
api_opts.add('--etcd-servers', connection_string, strict=True)
def render_files():
'''Use jinja templating to render the docker-compose.yml and master.json
file to contain the dynamic data for the configuration files.'''
context = {}
config = hookenv.config()
# Add the charm configuration data to the context.
context.update(config)
# Update the context with extra values: arch, and networking information
context.update({'arch': arch(),
'master_address': hookenv.unit_get('private-address'),
'public_address': hookenv.unit_get('public-address'),
'private_address': hookenv.unit_get('private-address')})
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
# Handle static options for now
api_opts.add('--min-request-timeout', '300')
api_opts.add('--v', '4')
api_opts.add('--client-ca-file', ca_cert_path)
api_opts.add('--tls-cert-file', server_cert_path)
api_opts.add('--tls-private-key-file', server_key_path)
scheduler_opts.add('--v', '2')
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('--min-resync-period', '3m')
controller_opts.add('--v', '2')
controller_opts.add('--root-ca-file', ca_cert_path)
context.update({'kube_apiserver_flags': api_opts.to_s(),
'kube_scheduler_flags': scheduler_opts.to_s(),
'kube_controller_manager_flags': controller_opts.to_s()})
# Render the configuration files that contains parameters for
# the apiserver, scheduler, and controller-manager
render_service('kube-apiserver', context)
render_service('kube-controller-manager', context)
render_service('kube-scheduler', context)
# explicitly render the generic defaults file
render('kube-defaults.defaults', '/etc/default/kube-defaults', context)
# when files change on disk, we need to inform systemd of the changes
call(['systemctl', 'daemon-reload'])
call(['systemctl', 'enable', 'kube-apiserver'])
call(['systemctl', 'enable', 'kube-controller-manager'])
call(['systemctl', 'enable', 'kube-scheduler'])
def render_service(service_name, context):
'''Render the systemd service by name.'''
unit_directory = '/lib/systemd/system'
source = '{0}.service'.format(service_name)
target = os.path.join(unit_directory, '{0}.service'.format(service_name))
render(source, target, context)
conf_directory = '/etc/default'
source = '{0}.defaults'.format(service_name)
target = os.path.join(conf_directory, service_name)
render(source, target, context)
def setup_basic_auth(username='admin', password='admin', user='admin'):
'''Create the htacces file and the tokens.'''
srv_kubernetes = '/srv/kubernetes'
if not os.path.isdir(srv_kubernetes):
os.makedirs(srv_kubernetes)
htaccess = os.path.join(srv_kubernetes, 'basic_auth.csv')
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(username, password, user))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
srv_kubernetes = '/srv/kubernetes'
if not os.path.isdir(srv_kubernetes):
os.makedirs(srv_kubernetes)
known_tokens = os.path.join(srv_kubernetes, 'known_tokens.csv')
if not token:
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32))
with open(known_tokens, 'w') as stream:
stream.write('{0},{1},{2}'.format(token, username, user))
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowcollector(base_resource) :
""" Configuration for AppFlow collector resource. """
def __init__(self) :
self._name = ""
self._ipaddress = ""
self._port = 0
self._netprofile = ""
self._newname = ""
self.___count = 0
@property
def name(self) :
"""Name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
Only four collectors can be configured.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow collector" or 'my appflow collector').<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
Only four collectors can be configured.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow collector" or 'my appflow collector').<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipaddress(self) :
"""IPv4 address of the collector.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
"""IPv4 address of the collector.
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def port(self) :
"""UDP port on which the collector listens.<br/>Default value: 4739.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
"""UDP port on which the collector listens.<br/>Default value: 4739
"""
try :
self._port = port
except Exception as e:
raise e
@property
def netprofile(self) :
"""Netprofile to associate with the collector. The IP address defined in the profile is used as the source IP address for AppFlow traffic for this collector. If you do not set this parameter, the NetScaler IP (NSIP) address is used as the source IP address.<br/>Maximum length = 128.
"""
try :
return self._netprofile
except Exception as e:
raise e
@netprofile.setter
def netprofile(self, netprofile) :
"""Netprofile to associate with the collector. The IP address defined in the profile is used as the source IP address for AppFlow traffic for this collector. If you do not set this parameter, the NetScaler IP (NSIP) address is used as the source IP address.<br/>Maximum length = 128
"""
try :
self._netprofile = netprofile
except Exception as e:
raise e
@property
def newname(self) :
"""New name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must
contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow coll" or 'my appflow coll').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
"""New name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must
contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow coll" or 'my appflow coll').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowcollector_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowcollector
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add appflowcollector.
"""
try :
if type(resource) is not list :
addresource = appflowcollector()
addresource.name = resource.name
addresource.ipaddress = resource.ipaddress
addresource.port = resource.port
addresource.netprofile = resource.netprofile
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].port = resource[i].port
addresources[i].netprofile = resource[i].netprofile
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete appflowcollector.
"""
try :
if type(resource) is not list :
deleteresource = appflowcollector()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
""" Use this API to rename a appflowcollector resource.
"""
try :
renameresource = appflowcollector()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the appflowcollector resources that are configured on netscaler.
"""
try :
if not name :
obj = appflowcollector()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = appflowcollector()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [appflowcollector() for _ in range(len(name))]
obj = [appflowcollector() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = appflowcollector()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of appflowcollector resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appflowcollector()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the appflowcollector resources configured on NetScaler.
"""
try :
obj = appflowcollector()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of appflowcollector resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appflowcollector()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class appflowcollector_response(base_response) :
def __init__(self, length=1) :
self.appflowcollector = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowcollector = [appflowcollector() for _ in range(length)]
|
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Tuple
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.polyflow.early_stopping import EarlyStoppingSchema
from polyaxon.polyflow.matrix.base import BaseSearchConfig
from polyaxon.polyflow.matrix.kinds import V1MatrixKind
from polyaxon.polyflow.matrix.params import HpParamSchema
from polyaxon.polyflow.matrix.tuner import TunerSchema
from polyaxon.polyflow.optimization import (
OptimizationMetricSchema,
OptimizationResourceSchema,
)
from polyaxon.schemas.base import BaseCamelSchema
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
class HyperbandSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal(V1MatrixKind.HYPERBAND))
params = fields.Dict(
keys=fields.Str(), values=fields.Nested(HpParamSchema), allow_none=True
)
max_iterations = RefOrObject(fields.Int(validate=validate.Range(min=1)))
eta = RefOrObject(fields.Float(validate=validate.Range(min=0)))
resource = fields.Nested(OptimizationResourceSchema)
metric = fields.Nested(OptimizationMetricSchema)
resume = RefOrObject(fields.Boolean(allow_none=True))
seed = RefOrObject(fields.Int(allow_none=True))
concurrency = RefOrObject(fields.Int(allow_none=True))
tuner = fields.Nested(TunerSchema, allow_none=True)
early_stopping = fields.List(fields.Nested(EarlyStoppingSchema), allow_none=True)
@staticmethod
def schema_config():
return V1Hyperband
class V1Hyperband(BaseSearchConfig, polyaxon_sdk.V1Hyperband):
"""Hyperband is a relatively new method for tuning iterative algorithms.
It performs random sampling and attempts to gain an edge
by using time spent optimizing in the best way.
The algorithm tries a large number of random configurations/experiments,
then decides which configurations to keep based on their progress.
The way Hyperband is implemented, it creates several buckets,
each bucket has a number of randomly generated hyperparameter configurations,
each configuration uses a resource (e.g. number of steps, number of epochs, batch size, ...).
To adapt the algorithm's maximum resource allocation, users can use `maxIterations`.
After trying a number of configurations, it chooses the top `number of observation/eta`
configurations and runs them using an increased `resource*eta` resource.
At last, it chooses the best configuration it has found so far.
The way Hyperband works is by discarding poor performing
configurations leaving more resources for more promising configurations
during the successive halving.
In order to use Hyperband correctly, you must define a metric called
`resource` that the algorithm will increase iteratively.
Args:
kind: string, should be equal to `hyperband`
params: List[Dict[str, [params](/docs/automation/optimization-engine/params/#discrete-values)]] # noqa
max_iterations: int
eta: int
resource: V1OptimizationResource
metric: V1OptimizationMetric
resume: bool, optional
seed: int, optional
concurrency: int, optional
tuner: [V1Tuner](/docs/automation/optimization-engine/tuner/), optional
early_stopping: List[[EarlyStopping](/docs/automation/helpers/early-stopping)], optional
## YAML usage
```yaml
>>> matrix:
>>> kind: hyperband
>>> concurrency:
>>> maxIterations:
>>> resource:
>>> metric:
>>> resume:
>>> params:
>>> seed:
>>> tuner:
>>> earlyStopping:
```
## Python usage
```python
>>> from polyaxon import types
>>> from polyaxon.polyflow import (
>>> V1Hyperband, V1HpLogSpace, V1HpChoice, V1FailureEarlyStopping, V1MetricEarlyStopping,
>>> V1OptimizationMetric, V1Optimization, V1OptimizationResource,
>>> )
>>> matrix = V1Hyperband(
>>> concurrency=20,
>>> params={"param1": V1HpLogSpace(...), "param2": V1HpChoice(...), ... },
>>> resume=True,
>>> metric=V1OptimizationMetric(name="loss", optimization=V1Optimization.MINIMIZE),
>>> resource=V1OptimizationResource(name="num_steps", type=types.INT),
>>> early_stopping=[V1FailureEarlyStopping(...), V1MetricEarlyStopping(...)]
>>> )
```
## Fields
### kind
The kind signals to the CLI, client, and other tools that this matrix is hyperband.
If you are using the python client to create the mapping,
this field is not required and is set by default.
```yaml
>>> matrix:
>>> kind: hyperband
```
### params
A dictionary of `key -> value generator`
to generate the parameters.
To learn about all possible
[params generators](/docs/automation/optimization-engine/params/).
> The parameters generated will be validated against
> the component's inputs/outputs definition to check that the values
> can be passed and have valid types.
```yaml
>>> matrix:
>>> kind: hyperband
>>> params:
>>> param1:
>>> kind: ...
>>> value: ...
>>> param2:
>>> kind: ...
>>> value: ...
```
### maxIterations
The algorithm's maximum resource allocation.
```yaml
>>> matrix:
>>> kind: hyperband
>>> maxIterations: 81
```
### eta
A parameter that tunes:
* The downsampling factor: `number of observation/eta`
* The resource increase factor: `resource*eta`
```yaml
>>> matrix:
>>> kind: hyperband
>>> eta: 3
```
### resource
The resource to optimize (should be an int or a float),
the resource van be the number of steps or epochs,
```yaml
>>> matrix:
>>> kind: hyperband
>>> resource:
>>> name: num_steps
>>> type: int
```
### metric
The metric to optimize during the iterations,
this is the metric that you want to maximize or minimize.
```yaml
>>> matrix:
>>> kind: hyperband
>>> metric:
>>> name: loss
>>> optimization: minimize
```
### resume
A flag to resume or restart the selected runs, default to false (restart)
```yaml
>>> matrix:
>>> kind: hyperband
>>> resume: True
```
### concurrency
An optional value to set the number of concurrent operations.
<blockquote class="light">
This value only makes sense if less or equal to the total number of possible runs.
</blockquote>
```yaml
>>> matrix:
>>> kind: random
>>> concurrency: 2
```
For more details about concurrency management,
please check the [concurrency section](/docs/automation/helpers/concurrency/).
### seed
Since this algorithm uses random generators,
if you want to control the seed for the random generator, you can pass a seed.
```yaml
>>> matrix:
>>> kind: hyperband
>>> seed: 523
```
### earlyStopping
A list of early stopping conditions to check for terminating
all operations managed by the pipeline.
If one of the early stopping conditions is met,
a signal will be sent to terminate all running and pending operations.
```yaml
>>> matrix:
>>> kind: hyperband
>>> earlyStopping: ...
```
For more details please check the
[early stopping section](/docs/automation/helpers/early-stopping/).
### tuner
The tuner reference (w/o a component hub reference) to use.
The component contains the logic for creating new suggestions,
users can override this section to provide a different tuner component.
```yaml
>>> matrix:
>>> kind: hyperband
>>> tuner:
>>> hubRef: 'acme/my-hyperband-tuner:version'
```
## Example
This is an example of using hyperband for hyperparameter search:
```yaml
>>> version: 1.1
>>> kind: operation
>>> matrix:
>>> kind: hyperband
>>> concurrency: 5
>>> maxIterations: 81
>>> eta: 3
>>> resource:
>>> name: num_steps
>>> type: int
>>> metric:
>>> name: loss
>>> optimization: minimize
>>> resume: False
>>> params:
>>> lr:
>>> kind: uniform
>>> value: [0, 0.9]
>>> dropout:
>>> kind: choice
>>> value: [0.25, 0.3]
>>> activation:
>>> kind: pchoice
>>> value: [[relu, 0.1], [sigmoid, 0.8]]
>>> early_stopping:
>>> - metric: accuracy
>>> value: 0.9
>>> optimization: maximize
>>> - metric: loss
>>> value: 0.05
>>> optimization: minimize
>>> component:
>>> inputs:
>>> - name: batch_size
>>> type: int
>>> isOptional: true
>>> value: 128
>>> - name: logspace
>>> type: float
>>> - name: dropout
>>> type: float
>>> container:
>>> image: image:latest
>>> command: [python3, train.py]
>>> args: [
>>> "--batch-size={{ batch_size }}",
>>> "--lr={{ lr }}",
>>> "--dropout={{ dropout }}",
>>> "--activation={{ activation }}"
>>> ]
```
In this example we allocate a maximum resources of `81`,
our resource in this case is the `num_steps` which is of type `int` that we pass to our model.
This is how the algorithm works with this config:
| | bucket=4 | | bucket=3 | | bucket=2 | | bucket=1 | | bucket=0 | | # noqa
|--------------|------------|----------------|------------|-----------------|-------------|----------------|------------|-----------------|------------|----------------| # noqa
|iteration |num configs |resource alloc |num configs |resource alloc |num configs |resource alloc |num configs |resource alloc |num configs |resource alloc | # noqa
|0 |81 |1 |27 |3 |9 |9 |6 |27 |5 | 81 | # noqa
|1 |27 |3 |9 |9 |3 |27 |2 |81 | | | # noqa
|2 |9 |9 |3 |27 |1 |81 | | | | | # noqa
|3 |3 |27 |1 |81 | | | | | | | # noqa
|4 |1 |81 | | | | | | | | | # noqa
"""
SCHEMA = HyperbandSchema
IDENTIFIER = V1MatrixKind.HYPERBAND
REDUCED_ATTRIBUTES = ["seed", "concurrency", "earlyStopping", "tuner", "resume"]
def set_tuning_params(self):
# Maximum iterations per configuration: max_iterations
# Defines configuration downsampling/elimination rate (default = 3): eta
# number of times to run hyperband (brackets)
# i.e. # of times to repeat the outer loops over the tradeoffs `s`
self.s_max = int(math.log(self.max_iterations) / math.log(self.eta))
self.B = (
self.s_max + 1
) * self.max_iterations # budget per bracket of successive halving
def get_bracket(self, iteration):
"""This defines the bracket `s` in outerloop `for s in reversed(range(self.s_max))`."""
return self.s_max - iteration
def should_create_iteration(self, iteration, bracket_iteration):
"""Return a boolean to indicate if we need to reschedule another iteration."""
bracket = self.get_bracket(iteration=iteration)
if bracket_iteration < bracket:
# The bracket is still processing
return False
# We can only reschedule if we can create a new bracket
return self.get_bracket(iteration=iteration + 1) >= 0
def get_num_runs_to_keep(self, num_runs, bracket_iteration):
"""Return the number of configs to keep and resume."""
num_runs = num_runs * (self.eta**-bracket_iteration)
return int(num_runs / self.eta)
def get_num_runs(self, bracket):
# n: initial number of configs
return int(
math.ceil(
(self.B / self.max_iterations) * (self.eta**bracket) / (bracket + 1)
)
)
def get_num_runs_to_keep_for_iteration(self, iteration, bracket_iteration):
"""Return the number of configs to keep for an iteration and iteration bracket.
This is just util function around `get_num_runs_to_keep`
"""
bracket = self.get_bracket(iteration=iteration)
if bracket_iteration == bracket + 1:
# End of loop `for bracket_iteration in range(bracket + 1):`
return 0
num_runs = self.get_num_runs(bracket=bracket)
return self.get_num_runs_to_keep(
num_runs=num_runs, bracket_iteration=bracket_iteration
)
def should_reduce_configs(self, iteration, bracket_iteration):
"""Return a boolean to indicate if we need to reschedule another bracket iteration."""
num_runs_to_keep = self.get_num_runs_to_keep_for_iteration(
iteration=iteration, bracket_iteration=bracket_iteration
)
return num_runs_to_keep > 0
def create_iteration(
self, iteration: int = None, bracket_iteration: int = 0
) -> Tuple[int, int]:
"""Create an iteration for hyperband."""
if iteration is None:
return 0, 0
should_create_iteration = self.should_create_iteration(
iteration=iteration,
bracket_iteration=bracket_iteration,
)
should_reduce_configs = self.should_reduce_configs(
iteration=iteration,
bracket_iteration=bracket_iteration,
)
if should_create_iteration:
iteration = iteration + 1
bracket_iteration = 0
elif should_reduce_configs:
bracket_iteration = bracket_iteration + 1
else:
raise ValueError(
"Hyperband create iteration failed, "
"could not reschedule or reduce configs"
)
return iteration, bracket_iteration
def should_reschedule(self, iteration, bracket_iteration):
"""Return a boolean to indicate if we need to reschedule another iteration."""
return self.should_create_iteration(
iteration=iteration, bracket_iteration=bracket_iteration
) or self.should_reduce_configs(
iteration=iteration,
bracket_iteration=bracket_iteration,
)
|
|
"""
If you find this kernel helpful please upvote. Also any suggestion for improvement will be warmly welcomed.
I made cosmetic changes in the [code](https://www.kaggle.com/aharless/kaggle-runnable-version-of-baris-kanber-s-lightgbm/code).
Added some new features. Ran for 25mil chunk rows.
Also taken ideas from various public kernels.
"""
FILENO= 6 #To distinguish the output file name.
debug=0 #Whethere or not in debuging mode
import pandas as pd
import time
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
import lightgbm as lgb
import gc
#import matplotlib.pyplot as plt
import os
###### Feature extraction ######
#### Extracting next click feature
### Taken help from https://www.kaggle.com/nanomathias/feature-engineering-importance-testing
###Did some Cosmetic changes
predictors=[]
def do_next_prev_Click( df,agg_suffix, agg_type='float32'):
print('Extracting new features...')
df['hour'] = pd.to_datetime(df.click_time).dt.hour.astype('int8')
df['day'] = pd.to_datetime(df.click_time).dt.day.astype('int8')
#### New added
df['minute'] = pd.to_datetime(df.click_time).dt.minute.astype('int8')
predictors.append('minute')
df['second'] = pd.to_datetime(df.click_time).dt.second.astype('int8')
predictors.append('second')
# print(f">> \nExtracting {agg_suffix} time calculation features...\n")
print(">> \nExtracting {0} time calculation features...\n".format(agg_suffix))
GROUP_BY_NEXT_CLICKS = [
# V1
# {'groupby': ['ip']},
# {'groupby': ['ip', 'app']},
# {'groupby': ['ip', 'channel']},
# {'groupby': ['ip', 'os']},
# V3
{'groupby': ['ip', 'app', 'device', 'os', 'channel']},
{'groupby': ['ip', 'os', 'device']},
{'groupby': ['ip', 'os', 'device', 'app']}
]
# Calculate the time to next click for each group
for spec in GROUP_BY_NEXT_CLICKS:
# Name of new feature
new_feature = '{}_{}'.format('_'.join(spec['groupby']),agg_suffix)
# Unique list of features to select
all_features = spec['groupby'] + ['click_time']
# Run calculation
#print(f">> Grouping by {spec['groupby']}, and saving time to {agg_suffix} in: {new_feature}")
print(">> Grouping by {0}, and saving time to {1} in: {2}".format(spec['groupby'], agg_suffix, new_feature))
if agg_suffix=="nextClick":
df[new_feature] = (df[all_features].groupby(spec[
'groupby']).click_time.shift(-1) - df.click_time).dt.seconds.astype(agg_type)
elif agg_suffix== "prevClick":
df[new_feature] = (df.click_time - df[all_features].groupby(spec[
'groupby']).click_time.shift(+1) ).dt.seconds.astype(agg_type)
predictors.append(new_feature)
gc.collect()
# print('predictors',predictors)
return (df)
## Below a function is written to extract count feature by aggregating different cols
def do_count( df, group_cols, agg_type='uint32', show_max=False, show_agg=True ):
agg_name='{}count'.format('_'.join(group_cols))
if show_agg:
print( "\nAggregating by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
## Below a function is written to extract unique count feature from different cols
def do_countuniq( df, group_cols, counted, agg_type='uint32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_countuniq'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCounting unqiue ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### Below a function is written to extract cumulative count feature from different cols
def do_cumcount( df, group_cols, counted,agg_type='uint32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_cumcount'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCumulative count by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].cumcount()
df[agg_name]=gp.values
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### Below a function is written to extract mean feature from different cols
def do_mean( df, group_cols, counted, agg_type='float32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_mean'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCalculating mean of ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
def do_var( df, group_cols, counted, agg_type='float32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_var'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCalculating variance of ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### A function is written to train the lightGBM model with different given parameters
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=50, num_boost_round=3000, verbose_eval=10, categorical_features=None):
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric':metrics,
'learning_rate': 0.01,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.4, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 10, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0.9, # L1 regularization term on weights
'reg_lambda': 0.9, # L2 regularization term on weights
'nthread': 8,
'verbose': 0,
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values.astype('float32'), label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values.astype('float32'), label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=100,
feval=feval)
# save model to file
model_name = "model" + str(FILENO) + ".txt"
bst1.save_model(model_name)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics+":", evals_results['valid'][metrics][bst1.best_iteration-1])
return (bst1,bst1.best_iteration)
## Running the full calculation.
#### A function is written here to run the full calculation with defined parameters.
def DO(frm,to,fileno):
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint8',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
# print('loading train data...',frm,to)
# train_df = pd.read_csv("../Data/train.csv", parse_dates=['click_time'], skiprows=range(1,frm), nrows=to-frm, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
# print('loading test data...')
# if debug:
# test_df = pd.read_csv("../Data/test.csv", nrows=100000, parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
# else:
# test_df = pd.read_csv("../Data/test.csv", parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
# len_train = len(train_df)
# train_df=train_df.append(test_df)
# del test_df
# gc.collect()
# train_df = do_next_prev_Click( train_df,agg_suffix='nextClick', agg_type='float32' ); gc.collect()
# # train_df = do_next_prev_Click( train_df,agg_suffix='prevClick', agg_type='float32' ); gc.collect() ## Removed temporarily due RAM sortage.
# train_df = do_countuniq( train_df, ['ip'], 'channel' ); gc.collect()
# train_df = do_countuniq( train_df, ['ip', 'device', 'os'], 'app'); gc.collect()
# train_df = do_countuniq( train_df, ['ip', 'day'], 'hour' ); gc.collect()
# train_df = do_countuniq( train_df, ['ip'], 'app'); gc.collect()
# train_df = do_countuniq( train_df, ['ip', 'app'], 'os'); gc.collect()
# train_df = do_countuniq( train_df, ['ip'], 'device'); gc.collect()
# train_df = do_countuniq( train_df, ['app'], 'channel'); gc.collect()
# train_df = do_cumcount( train_df, ['ip'], 'os'); gc.collect()
# train_df = do_cumcount( train_df, ['ip', 'device', 'os'], 'app'); gc.collect()
# train_df = do_count( train_df, ['ip', 'day', 'hour'] ); gc.collect()
# train_df = do_count( train_df, ['ip', 'app']); gc.collect()
# train_df = do_count( train_df, ['ip', 'app', 'os']); gc.collect()
# train_df = do_var( train_df, ['ip', 'day', 'channel'], 'hour'); gc.collect()
# train_df = do_var( train_df, ['ip', 'app', 'os'], 'hour'); gc.collect()
# train_df = do_var( train_df, ['ip', 'app', 'channel'], 'day'); gc.collect()
# train_df = do_mean( train_df, ['ip', 'app', 'channel'], 'hour' ); gc.collect()
# print(train_df.head(5))
# gc.collect()
# print('\n\nBefore appending predictors...\n\n',sorted(predictors))
target = 'is_attributed'
word= ['app','device','os', 'channel', 'hour', 'day','minute', 'second']
# for feature in word:
# if feature not in predictors:
# predictors.append(feature)
predictors = ['app', 'app_by_channel_countuniq', 'channel', 'day', 'device', 'hour', 'ip_app_by_os_countuniq',
'ip_app_channel_by_day_var', 'ip_app_channel_by_hour_mean',
'ip_app_os_by_hour_var', 'ip_app_oscount', 'ip_appcount', 'ip_by_app_countuniq', 'ip_by_channel_countuniq',
'ip_by_device_countuniq', 'ip_by_os_cumcount', 'ip_day_by_hour_countuniq', 'ip_day_channel_by_hour_var',
'ip_day_hourcount', 'ip_device_os_by_app_countuniq', 'ip_device_os_by_app_cumcount',
# 'ip_os_device_app_nextClick', 'ip_os_device_nextClick', 'ip_app_device_os_channel_nextClick',
'minute', 'os', 'second']
categorical = ['app', 'device', 'os', 'channel', 'hour', 'day','minute', 'second']
print('\n\nAfter appending predictors...\n\n',sorted(predictors))
# train_df.to_pickle("daniel.pkl")
# df = train_df
df = pd.read_pickle("daniel.pkl")
len_train = to - frm
test_df = df[len_train:]
val_df = df[(len_train-val_size):len_train]
train_df = df[:(len_train-val_size)]
del df
gc.collect()
print("\ntrain size: ", len(train_df))
print("\nvalid size: ", len(val_df))
print("\ntest size : ", len(test_df))
# load model to predict
bst = lgb.Booster(model_file = 'model6.txt')
# search_iterations = [50, 1100, 50]
# for i in range(search_iterations[0], search_iterations[1], search_iterations[2]):
# y_pred = bst.predict(val_df[predictors].values.astype('float32'), num_iteration=i)
# score = metrics.roc_auc_score(val_df[target].values, y_pred)
# loss = metrics.log_loss(val_df[target].values, y_pred)
# print ("Iteration: {0} AUC: {1} Logloss: {2}".format(i, score, loss))
sub = pd.DataFrame()
sub['click_id'] = test_df['click_id'].astype('int')
gc.collect()
# print("Training...")
# start_time = time.time()
# params = {
# 'learning_rate': 0.01,
# #'is_unbalance': 'true', # replaced with scale_pos_weight argument
# 'num_leaves': 31, # 2^max_depth - 1
# 'max_depth': -1, # -1 means no limit
# 'min_child_samples': 200, # Minimum number of data need in a child(min_data_in_leaf)
# 'max_bin': 200, # Number of bucketed bin for feature values
# 'subsample': 0.8, # Subsample ratio of the training instance.
# 'subsample_freq': 1, # frequence of subsample, <=0 means no enable
# 'colsample_bytree': 0.9, # Subsample ratio of columns when constructing each tree.
# 'min_child_weight': 10, # Minimum sum of instance weight(hessian) needed in a child(leaf)
# 'scale_pos_weight':300 # because training data is extremely unbalanced
# }
# (bst,best_iteration) = lgb_modelfit_nocv(params,
# train_df,
# val_df,
# predictors,
# target,
# objective='binary',
# metrics= 'binary_logloss', #'auc',
# early_stopping_rounds=3000,
# verbose_eval=True,
# num_boost_round=1000,
# categorical_features=categorical)
# print('[{}]: model training time'.format(time.time() - start_time))
# del train_df
# del val_df
# gc.collect()
# ax = lgb.plot_importance(bst, max_num_features=100)
# plt.show()
# plt.savefig('foo.png')
print("Predicting...")
sub['is_attributed'] = bst.predict(test_df[predictors],num_iteration=500) #best_iteration)
# if not debug:
# print("writing...")
sub.to_csv('sub_it%d.csv'%(fileno),index=False,float_format='%.9f')
print("done...")
return sub
####### Chunk size defining and final run ############
nrows=184903891-1
nchunk=12000000
val_size=1200000
frm=nrows-65000000
if debug:
frm=0
nchunk=100000
val_size=10000
to=frm+nchunk
sub=DO(frm,to,FILENO)
|
|
# coding: utf-8
"""
PhraseApp
PhraseApp API for the interaction with the PhraseApp localization platform
OpenAPI spec version: 2.0
Contact: support@phraseapp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import ssl
import certifi
import logging
import re
# python 2 and python 3 compatibility library
from six import PY3
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""
Returns a dictionary of the response headers.
"""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""
Returns a given response header.
"""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=4):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680
# maxsize is the number of requests to host that are allowed in parallel
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True, _request_timeout=None):
"""
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if PY3 else (int, long)):
timeout = urllib3.Timeout(total=_request_timeout)
elif isinstance(_request_timeout, tuple) and len(_request_timeout) == 2:
timeout = urllib3.Timeout(connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body:
request_body = json.dumps(body)
r = self.pool_manager.request(method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded':
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct Content-Type
# which generated by urllib3 will be overwritten.
del headers['Content-Type']
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is provided
# in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided arguments.
Please check that your arguments match declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None, _preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True,
_request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""
Custom error messages for exception
"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
|
|
import os
from base64 import b64encode
from django.conf import settings
from django.core.files.storage import default_storage as storage
import mock
import pytest
from olympia import amo
from olympia.amo.tests import addon_factory
from olympia.versions.models import VersionPreview
from olympia.versions.tasks import generate_static_theme_preview
HEADER_ROOT = os.path.join(
settings.ROOT, 'src/olympia/versions/tests/static_themes/')
def check_render(svg_content, header_url, header_height, preserve_aspect_ratio,
mimetype, valid_img, colors, svg_width,
svg_height, inner_width):
# check header is there.
assert 'width="%s" height="%s" xmlns="http://www.w3.org/2000/' % (
svg_width, svg_height) in svg_content
# check image xml is correct
image_tag = (
'<image id="svg-header-img" width="%s" height="%s" '
'preserveAspectRatio="%s"' % (
inner_width, header_height, preserve_aspect_ratio))
assert image_tag in svg_content, svg_content
# and image content is included and was encoded
if valid_img:
with storage.open(HEADER_ROOT + header_url, 'rb') as header_file:
header_blob = header_file.read()
base_64_uri = 'data:%s;base64,%s' % (
mimetype, b64encode(header_blob))
else:
base_64_uri = ''
assert 'xlink:href="%s"></image>' % base_64_uri in svg_content, svg_content
# check each of our colors above was included
for color in colors:
assert color in svg_content
def check_preview(preview_instance, theme_size_constant, write_svg_mock_args,
resize_image_mock_args, png_crush_mock_args):
_, png_path = write_svg_mock_args
assert png_path == preview_instance.image_path
assert preview_instance.sizes == {
'image': list(theme_size_constant['full']),
'thumbnail': list(theme_size_constant['thumbnail'])
}
resize_path, thumb_path, thumb_size = resize_image_mock_args
assert resize_path == png_path
assert thumb_path == preview_instance.thumbnail_path
assert thumb_size == theme_size_constant['thumbnail']
assert png_crush_mock_args[0] == preview_instance.image_path
@pytest.mark.django_db
@mock.patch('olympia.versions.tasks.pngcrush_image')
@mock.patch('olympia.versions.tasks.resize_image')
@mock.patch('olympia.versions.tasks.write_svg_to_png')
@pytest.mark.parametrize(
'header_url, header_height, preserve_aspect_ratio, mimetype, valid_img', (
('transparent.gif', 1, 'xMaxYMin meet', 'image/gif', True),
('weta.png', 200, 'xMaxYMin meet', 'image/png', True),
('wetalong.png', 200, 'xMaxYMin slice', 'image/png', True),
('missing_file.png', 0, 'xMaxYMin meet', '', False),
('empty-no-ext', 10, 'xMaxYMin meet', 'image/png', True),
(None, 0, 'xMaxYMin meet', '', False), # i.e. no headerURL entry
)
)
def test_generate_static_theme_preview(
write_svg_to_png_mock, resize_image_mock, pngcrush_image_mock,
header_url, header_height, preserve_aspect_ratio, mimetype, valid_img):
write_svg_to_png_mock.return_value = True
theme_manifest = {
"images": {
},
"colors": {
"accentcolor": "#918e43",
"textcolor": "#3deb60",
"toolbar_text": "#b5ba5b",
"toolbar_field": "#cc29cc",
"toolbar_field_text": "#17747d",
"tab_line": "#00db12",
}
}
if header_url is not None:
theme_manifest['images']['headerURL'] = header_url
addon = addon_factory()
generate_static_theme_preview(
theme_manifest, HEADER_ROOT, addon.current_version.pk)
assert resize_image_mock.call_count == 3
assert write_svg_to_png_mock.call_count == 3
assert pngcrush_image_mock.call_count == 3
# First check the header Preview is good
header_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['header']['position'])
check_preview(
header_preview, amo.THEME_PREVIEW_SIZES['header'],
write_svg_to_png_mock.call_args_list[0][0],
resize_image_mock.call_args_list[0][0],
pngcrush_image_mock.call_args_list[0][0])
# Then the list Preview
list_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['list']['position'])
check_preview(
list_preview, amo.THEME_PREVIEW_SIZES['list'],
write_svg_to_png_mock.call_args_list[1][0],
resize_image_mock.call_args_list[1][0],
pngcrush_image_mock.call_args_list[1][0])
# And finally the new single Preview
single_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['single']['position'])
check_preview(
single_preview, amo.THEME_PREVIEW_SIZES['single'],
write_svg_to_png_mock.call_args_list[2][0],
resize_image_mock.call_args_list[2][0],
pngcrush_image_mock.call_args_list[2][0])
# Now check the svg renders
header_svg = write_svg_to_png_mock.call_args_list[0][0][0]
list_svg = write_svg_to_png_mock.call_args_list[1][0][0]
single_svg = write_svg_to_png_mock.call_args_list[2][0][0]
colors = ['class="%s" fill="%s"' % (key, color)
for (key, color) in theme_manifest['colors'].items()]
check_render(header_svg, header_url, header_height,
preserve_aspect_ratio, mimetype, valid_img, colors,
680, 92, 680)
check_render(list_svg, header_url, header_height,
preserve_aspect_ratio, mimetype, valid_img, colors,
760, 92, 760)
check_render(single_svg, header_url, header_height,
preserve_aspect_ratio, mimetype, valid_img, colors,
720, 92, 720)
@pytest.mark.django_db
@mock.patch('olympia.versions.tasks.pngcrush_image')
@mock.patch('olympia.versions.tasks.resize_image')
@mock.patch('olympia.versions.tasks.write_svg_to_png')
def test_generate_static_theme_preview_with_chrome_properties(
write_svg_to_png_mock, resize_image_mock, pngcrush_image_mock):
write_svg_to_png_mock.return_value = True
theme_manifest = {
"images": {
"theme_frame": "transparent.gif"
},
"colors": {
"frame": [123, 45, 67], # 'accentcolor'
"tab_background_text": [9, 87, 65], # 'textcolor'
"bookmark_text": [0, 0, 0], # 'toolbar_text'
}
}
addon = addon_factory()
generate_static_theme_preview(
theme_manifest, HEADER_ROOT, addon.current_version.pk)
assert resize_image_mock.call_count == 3
assert write_svg_to_png_mock.call_count == 3
assert pngcrush_image_mock.call_count == 3
# First check the header Preview is good
header_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['header']['position'])
check_preview(
header_preview, amo.THEME_PREVIEW_SIZES['header'],
write_svg_to_png_mock.call_args_list[0][0],
resize_image_mock.call_args_list[0][0],
pngcrush_image_mock.call_args_list[0][0])
# Then the list Preview
list_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['list']['position'])
check_preview(
list_preview, amo.THEME_PREVIEW_SIZES['list'],
write_svg_to_png_mock.call_args_list[1][0],
resize_image_mock.call_args_list[1][0],
pngcrush_image_mock.call_args_list[1][0])
# And finally the new single Preview
single_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['single']['position'])
check_preview(
single_preview, amo.THEME_PREVIEW_SIZES['single'],
write_svg_to_png_mock.call_args_list[2][0],
resize_image_mock.call_args_list[2][0],
pngcrush_image_mock.call_args_list[2][0])
colors = []
# check each of our colors above was converted to css codes
chrome_colors = {
'bookmark_text': 'toolbar_text',
'frame': 'accentcolor',
'tab_background_text': 'textcolor',
}
for (chrome_prop, firefox_prop) in chrome_colors.items():
color_list = theme_manifest['colors'][chrome_prop]
color = 'rgb(%s,%s,%s)' % tuple(color_list)
colors.append('class="%s" fill="%s"' % (firefox_prop, color))
header_svg = write_svg_to_png_mock.call_args_list[0][0][0]
list_svg = write_svg_to_png_mock.call_args_list[1][0][0]
single_svg = write_svg_to_png_mock.call_args_list[2][0][0]
check_render(header_svg, 'transparent.gif', 1,
'xMaxYMin meet', 'image/gif', True, colors, 680, 92, 680)
check_render(list_svg, 'transparent.gif', 1,
'xMaxYMin meet', 'image/gif', True, colors, 760, 92, 760)
check_render(single_svg, 'transparent.gif', 1,
'xMaxYMin meet', 'image/gif', True, colors, 720, 92, 720)
def check_render_additional(svg_content, inner_svg_width):
# check additional background pattern is correct
image_width = 270
image_height = 200
pattern_x_offset = (inner_svg_width - image_width) / 2
pattern_tag = (
'<pattern id="AdditionalBackground1"\n'
' width="%s" height="%s"\n'
' x="%s" y="%s" patternUnits="userSpaceOnUse">' % (
image_width, image_height, pattern_x_offset, 0))
assert pattern_tag in svg_content, svg_content
image_tag = '<image width="%s" height="%s"' % (image_width, image_height)
assert image_tag in svg_content, svg_content
rect_tag = (
'<rect width="100%" height="100%" fill="url(#AdditionalBackground1)">'
'</rect>')
assert rect_tag in svg_content, svg_content
# and image content is included and was encoded
additional = os.path.join(HEADER_ROOT, 'weta_for_tiling.png')
with storage.open(additional, 'rb') as header_file:
header_blob = header_file.read()
base_64_uri = 'data:%s;base64,%s' % ('image/png', b64encode(header_blob))
assert 'xlink:href="%s"></image>' % base_64_uri in svg_content
@pytest.mark.django_db
@mock.patch('olympia.versions.tasks.pngcrush_image')
@mock.patch('olympia.versions.tasks.resize_image')
@mock.patch('olympia.versions.tasks.write_svg_to_png')
def test_generate_preview_with_additional_backgrounds(
write_svg_to_png_mock, resize_image_mock, pngcrush_image_mock):
write_svg_to_png_mock.return_value = True
theme_manifest = {
"images": {
"headerURL": "empty.png",
"additional_backgrounds": ["weta_for_tiling.png"],
},
"colors": {
"accentcolor": "#918e43",
"textcolor": "#3deb60",
},
"properties": {
"additional_backgrounds_alignment": ["top"],
"additional_backgrounds_tiling": ["repeat-x"],
},
}
addon = addon_factory()
generate_static_theme_preview(
theme_manifest, HEADER_ROOT, addon.current_version.pk)
assert resize_image_mock.call_count == 3
assert write_svg_to_png_mock.call_count == 3
assert pngcrush_image_mock.call_count == 3
# First check the header Preview is good
header_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['header']['position'])
check_preview(
header_preview, amo.THEME_PREVIEW_SIZES['header'],
write_svg_to_png_mock.call_args_list[0][0],
resize_image_mock.call_args_list[0][0],
pngcrush_image_mock.call_args_list[0][0])
# Then the list Preview
list_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['list']['position'])
check_preview(
list_preview, amo.THEME_PREVIEW_SIZES['list'],
write_svg_to_png_mock.call_args_list[1][0],
resize_image_mock.call_args_list[1][0],
pngcrush_image_mock.call_args_list[1][0])
# And finally the new single Preview
single_preview = VersionPreview.objects.get(
version=addon.current_version,
position=amo.THEME_PREVIEW_SIZES['single']['position'])
check_preview(
single_preview, amo.THEME_PREVIEW_SIZES['single'],
write_svg_to_png_mock.call_args_list[2][0],
resize_image_mock.call_args_list[2][0],
pngcrush_image_mock.call_args_list[2][0])
header_svg = write_svg_to_png_mock.call_args_list[0][0][0]
list_svg = write_svg_to_png_mock.call_args_list[1][0][0]
single_svg = write_svg_to_png_mock.call_args_list[2][0][0]
check_render_additional(header_svg, 680)
check_render_additional(list_svg, 760)
check_render_additional(single_svg, 720)
|
|
"""
=======================================
Simulate raw data using subject anatomy
=======================================
This example illustrates how to generate source estimates and simulate raw data
using subject anatomy with the :class:`mne.simulation.SourceSimulator` class.
Once the raw data is simulated, generated source estimates are reconstructed
using dynamic statistical parametric mapping (dSPM) inverse operator.
"""
# Author: Ivana Kojcic <ivana.kojcic@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Kostiantyn Maksymenko <kostiantyn.maksymenko@gmail.com>
# Samuel Deslauriers-Gauthier <sam.deslauriers@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
print(__doc__)
# In this example, raw data will be simulated for the sample subject, so its
# information needs to be loaded. This step will download the data if it not
# already on your machine. Subjects directory is also set so it doesn't need
# to be given to functions.
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'sample'
meg_path = op.join(data_path, 'MEG', subject)
# First, we get an info structure from the sample subject.
fname_info = op.join(meg_path, 'sample_audvis_raw.fif')
info = mne.io.read_info(fname_info)
tstep = 1 / info['sfreq']
# To simulate sources, we also need a source space. It can be obtained from the
# forward solution of the sample subject.
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
# To simulate raw data, we need to define when the activity occurs using events
# matrix and specify the IDs of each event.
# Noise covariance matrix also needs to be defined.
# Here, both are loaded from the sample dataset, but they can also be specified
# by the user.
fname_event = op.join(meg_path, 'sample_audvis_raw-eve.fif')
fname_cov = op.join(meg_path, 'sample_audvis-cov.fif')
events = mne.read_events(fname_event)
noise_cov = mne.read_cov(fname_cov)
# Standard sample event IDs. These values will correspond to the third column
# in the events matrix.
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
# Take only a few events for speed
events = events[:80]
###############################################################################
# In order to simulate source time courses, labels of desired active regions
# need to be specified for each of the 4 simulation conditions.
# Make a dictionary that maps conditions to activation strengths within
# aparc.a2009s [1]_ labels. In the aparc.a2009s parcellation:
#
# - 'G_temp_sup-G_T_transv' is the label for primary auditory area
# - 'S_calcarine' is the label for primary visual area
#
# In each of the 4 conditions, only the primary area is activated. This means
# that during the activations of auditory areas, there are no activations in
# visual areas and vice versa.
# Moreover, for each condition, contralateral region is more active (here, 2
# times more) than the ipsilateral.
activations = {
'auditory/left':
[('G_temp_sup-G_T_transv-lh', 30), # label, activation (nAm)
('G_temp_sup-G_T_transv-rh', 60)],
'auditory/right':
[('G_temp_sup-G_T_transv-lh', 60),
('G_temp_sup-G_T_transv-rh', 30)],
'visual/left':
[('S_calcarine-lh', 30),
('S_calcarine-rh', 60)],
'visual/right':
[('S_calcarine-lh', 60),
('S_calcarine-rh', 30)],
}
annot = 'aparc.a2009s'
# Load the 4 necessary label names.
label_names = sorted(set(activation[0]
for activation_list in activations.values()
for activation in activation_list))
region_names = list(activations.keys())
###############################################################################
# Create simulated source activity
# --------------------------------
#
# Generate source time courses for each region. In this example, we want to
# simulate source activity for a single condition at a time. Therefore, each
# evoked response will be parametrized by latency and duration.
def data_fun(times, latency, duration):
"""Function to generate source time courses for evoked responses,
parametrized by latency and duration."""
f = 15 # oscillating frequency, beta band [Hz]
sigma = 0.375 * duration
sinusoid = np.sin(2 * np.pi * f * (times - latency))
gf = np.exp(- (times - latency - (sigma / 4.) * rng.rand(1)) ** 2 /
(2 * (sigma ** 2)))
return 1e-9 * sinusoid * gf
###############################################################################
# Here, :class:`~mne.simulation.SourceSimulator` is used, which allows to
# specify where (label), what (source_time_series), and when (events) event
# type will occur.
#
# We will add data for 4 areas, each of which contains 2 labels. Since add_data
# method accepts 1 label per call, it will be called 2 times per area.
#
# Evoked responses are generated such that the main component peaks at 100ms
# with a duration of around 30ms, which first appears in the contralateral
# cortex. This is followed by a response in the ipsilateral cortex with a peak
# about 15ms after. The amplitude of the activations will be 2 times higher in
# the contralateral region, as explained before.
#
# When the activity occurs is defined using events. In this case, they are
# taken from the original raw data. The first column is the sample of the
# event, the second is not used. The third one is the event id, which is
# different for each of the 4 areas.
times = np.arange(150, dtype=np.float64) / info['sfreq']
duration = 0.03
rng = np.random.RandomState(7)
source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)
for region_id, region_name in enumerate(region_names, 1):
events_tmp = events[np.where(events[:, 2] == region_id)[0], :]
for i in range(2):
label_name = activations[region_name][i][0]
label_tmp = mne.read_labels_from_annot(subject, annot,
subjects_dir=subjects_dir,
regexp=label_name,
verbose=False)
label_tmp = label_tmp[0]
amplitude_tmp = activations[region_name][i][1]
if region_name.split('/')[1][0] == label_tmp.hemi[0]:
latency_tmp = 0.115
else:
latency_tmp = 0.1
wf_tmp = data_fun(times, latency_tmp, duration)
source_simulator.add_data(label_tmp,
amplitude_tmp * wf_tmp,
events_tmp)
# To obtain a SourceEstimate object, we need to use `get_stc()` method of
# SourceSimulator class.
stc_data = source_simulator.get_stc()
###############################################################################
# Simulate raw data
# -----------------
#
# Project the source time series to sensor space. Three types of noise will be
# added to the simulated raw data:
#
# - multivariate Gaussian noise obtained from the noise covariance from the
# sample data
# - blink (EOG) noise
# - ECG noise
#
# The :class:`~mne.simulation.SourceSimulator` can be given directly to the
# :func:`~mne.simulation.simulate_raw` function.
raw_sim = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)
raw_sim.set_eeg_reference(projection=True)
mne.simulation.add_noise(raw_sim, cov=noise_cov, random_state=0)
mne.simulation.add_eog(raw_sim, random_state=0)
mne.simulation.add_ecg(raw_sim, random_state=0)
# Plot original and simulated raw data.
raw_sim.plot(title='Simulated raw data')
###############################################################################
# Extract epochs and compute evoked responsses
# --------------------------------------------
#
epochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3,
baseline=(None, 0))
evoked_aud_left = epochs['auditory/left'].average()
evoked_vis_right = epochs['visual/right'].average()
# Visualize the evoked data
evoked_aud_left.plot(spatial_colors=True)
evoked_vis_right.plot(spatial_colors=True)
###############################################################################
# Reconstruct simulated source time courses using dSPM inverse operator
# ---------------------------------------------------------------------
#
# Here, source time courses for auditory and visual areas are reconstructed
# separately and their difference is shown. This was done merely for better
# visual representation of source reconstruction.
# As expected, when high activations appear in primary auditory areas, primary
# visual areas will have low activations and vice versa.
method, lambda2 = 'dSPM', 1. / 9.
inv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov)
stc_aud = mne.minimum_norm.apply_inverse(
evoked_aud_left, inv, lambda2, method)
stc_vis = mne.minimum_norm.apply_inverse(
evoked_vis_right, inv, lambda2, method)
stc_diff = stc_aud - stc_vis
brain = stc_diff.plot(subjects_dir=subjects_dir, initial_time=0.1,
hemi='split', views=['lat', 'med'])
###############################################################################
# References
# ----------
# .. [1] Destrieux C, Fischl B, Dale A, Halgren E (2010). Automatic
# parcellation of human cortical gyri and sulci using standard
# anatomical nomenclature, vol. 53(1), 1-15, NeuroImage.
|
|
# -*- coding: utf-8 -*-
from functools import update_wrapper
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from cms import constants
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'CONTENT_CACHE_DURATION': 60,
'APPHOOKS': [],
'TOOLBARS': [],
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'PAGE_CACHE': True,
'PLACEHOLDER_CACHE': True,
'PLUGIN_CACHE': True,
'CACHE_PREFIX': 'cms-',
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'MAX_PAGE_PUBLISH_REVERSIONS': 10,
'MAX_PAGE_HISTORY_REVERSIONS': 15,
'TOOLBAR_URL__EDIT_ON': 'edit',
'TOOLBAR_URL__EDIT_OFF': 'edit_off',
'TOOLBAR_URL__BUILD': 'build',
'TOOLBAR_URL__DISABLE': 'toolbar_off',
'ADMIN_NAMESPACE': 'admin',
}
def get_cache_durations():
return {
'menus': getattr(settings, 'MENU_CACHE_DURATION', 60 * 60),
'content': get_cms_setting('CONTENT_CACHE_DURATION'),
'permissions': 60 * 60,
}
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_URL')
def get_media_url():
return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('CMS_TOOLBAR_URL__EDIT_ON')
def get_toolbar_url__edit_on():
return get_cms_setting('TOOLBAR_URL__EDIT_ON')
@default('CMS_TOOLBAR_URL__EDIT_OFF')
def get_toolbar_url__edit_off():
return get_cms_setting('TOOLBAR_URL__EDIT_OFF')
@default('CMS_TOOLBAR_URL__BUILD')
def get_toolbar_url__build():
return get_cms_setting('TOOLBAR_URL__BUILD')
@default('CMS_TOOLBAR_URL__DISABLE')
def get_toolbar_url__disable():
return get_cms_setting('TOOLBAR_URL__DISABLE')
def get_templates():
from cms.utils.django_load import load_from_file
if getattr(settings, 'CMS_TEMPLATES_DIR', False):
tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False)
# CMS_TEMPLATES_DIR can either be a string poiting to the templates directory
# or a dictionary holding 'site: template dir' entries
if isinstance(tpldir, dict):
tpldir = tpldir[settings.SITE_ID]
# We must extract the relative path of CMS_TEMPLATES_DIR to the neares
# valid templates directory. Here we mimick what the filesystem and
# app_directories template loaders do
prefix = ''
# Relative to TEMPLATE_DIRS for filesystem loader
for basedir in settings.TEMPLATE_DIRS:
if tpldir.find(basedir) == 0:
prefix = tpldir.replace(basedir + os.sep, '')
break
# Relative to 'templates' directory that app_directory scans
if not prefix:
components = tpldir.split(os.sep)
try:
prefix = os.path.join(*components[components.index('templates') + 1:])
except ValueError:
# If templates is not found we use the directory name as prefix
# and hope for the best
prefix = os.path.basename(tpldir)
config_path = os.path.join(tpldir, '__init__.py')
# Try to load templates list and names from the template module
# If module file is not present skip configuration and just dump the filenames as templates
if config_path:
template_module = load_from_file(config_path)
templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()]
else:
templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir))
else:
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _(constants.TEMPLATE_INHERITANCE_LABEL)))
return templates
def _ensure_languages_settings(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
if not isinstance(languages, dict):
raise ImproperlyConfigured(
"CMS_LANGUAGES must be a dictionary with site IDs and 'default'"
" as keys. Please check the format.")
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if not isinstance(site, six.integer_types):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
languages[VERIFIED] = True # this will be busted by @override_settings and cause a re-check
return languages
def get_languages():
if not isinstance(settings.SITE_ID, six.integer_types):
raise ImproperlyConfigured(
"SITE_ID must be an integer"
)
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES):
raise ImproperlyConfigured(
'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE
)
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on,
'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off,
'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__build,
'CMS_TOOLBAR_URL__DISABLE': get_toolbar_url__disable,
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
else:
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
def get_site_id(site):
from django.contrib.sites.models import Site
if isinstance(site, Site):
return site.id
try:
return int(site)
except (TypeError, ValueError):
pass
return settings.SITE_ID
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import abc
import contextlib
import datetime
import functools
import inspect
import logging as py_logging
import math
import os
import pyclbr
import random
import re
import shutil
import socket
import stat
import sys
import tempfile
import time
import types
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
import retrying
import six
import webob.exc
from jacket.storage import exception
from jacket.storage.i18n import _, _LE, _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
VALID_TRACE_FLAGS = {'method', 'api'}
TRACE_METHOD = False
TRACE_API = False
synchronized = lockutils.synchronized_with_prefix('storage-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `storage.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "storage", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/storage/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.items():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def cinderdir():
from jacket import storage
return os.path.abspath(storage.__file__).split('storage/__init__.py')[0]
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
if six.PY3 and isinstance(xml_string, bytes):
# On Python 3, minidom.parseString() requires Unicode when
# the parser parameter is used.
#
# Bet that XML used in Cinder is always encoded to UTF-8.
xml_string = xml_string.decode('utf-8')
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return val in ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0')
def is_none_string(val):
"""Check if a string represents a None value."""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def monkey_patch():
"""Patches decorators for all functions in a specified module.
If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'storage.api.storage.ec2.cloud:' \
storage.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See storage.openstack.common.notifier.api.notify_decorator)
:param name: name of the function
:param function: object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
# On Python 3, unbound methods are regular functions
predicate = inspect.isfunction if six.PY3 else inspect.ismethod
for method, func in inspect.getmembers(clz, predicate):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
elif isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if six.PY3:
hostname = hostname.encode('latin-1', 'ignore')
hostname = hostname.decode('latin-1')
else:
if isinstance(hostname, six.text_type):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = (timeutils.utcnow(with_timezone=True) -
last_heartbeat).total_seconds()
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
def robust_file_write(directory, filename, data):
"""Robust file write.
Use "write to temp file and rename" model for writing the
persistence file.
:param directory: Target directory to create a file.
:param filename: File name to store specified data.
:param data: String data.
"""
tempname = None
dirfd = None
try:
dirfd = os.open(directory, os.O_DIRECTORY)
# write data to temporary file
with tempfile.NamedTemporaryFile(prefix=filename,
dir=directory,
delete=False) as tf:
tempname = tf.name
tf.write(data.encode('utf-8'))
tf.flush()
os.fdatasync(tf.fileno())
tf.close()
# Fsync the directory to ensure the fact of the existence of
# the temp file hits the disk.
os.fsync(dirfd)
# If destination file exists, it will be replaced silently.
os.rename(tempname, os.path.join(directory, filename))
# Fsync the directory to ensure the rename hits the disk.
os.fsync(dirfd)
except OSError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to write persistence file: %(path)s."),
{'path': os.path.join(directory, filename)})
if os.path.isfile(tempname):
os.unlink(tempname)
finally:
if dirfd:
os.close(dirfd)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s',
six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo jacket-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Wrapper to automatically set root_helper in brick calls.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def get_file_size(path):
"""Returns the file size."""
return os.stat(path).st_size
def _get_disk_of_partition(devpath, st=None):
"""Gets a disk device path and status from partition path.
Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
name ends with numbers).
"""
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
if diskpath != devpath:
try:
st_disk = os.stat(diskpath)
if stat.S_ISBLK(st_disk.st_mode):
return (diskpath, st_disk)
except OSError:
pass
# devpath is not a partition
if st is None:
st = os.stat(devpath)
return (devpath, st)
def get_bool_param(param_string, params):
param = params.get(param_string, False)
if not is_valid_boolstr(param):
msg = _('Value %(param)s for %(param_string)s is not a '
'boolean.') % {'param': param, 'param_string': param_string}
raise exception.InvalidParameterValue(err=msg)
return strutils.bool_from_string(param, strict=True)
def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get 'major:minor' number of block device.
Get the device's 'major:minor' number of a block device to control
I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device.
"""
st = os.stat(path)
if stat.S_ISBLK(st.st_mode):
path, st = _get_disk_of_partition(path, st)
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
elif stat.S_ISCHR(st.st_mode):
# No I/O ratelimit control is provided for character devices
return None
elif lookup_for_file:
# lookup the mounted disk which the file lies on
out, _err = execute('df', path)
devpath = out.split("\n")[1].split()[0]
if devpath[0] is not '/':
# the file is on a network file system
return None
return get_blkdev_major_minor(devpath, False)
else:
msg = _("Unable to get a block device for file \'%s\'") % path
raise exception.Error(msg)
def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string.
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume):
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
if isinstance(volume['volume_admin_metadata'], dict):
volume_admin_metadata = volume['volume_admin_metadata']
for key in volume_admin_metadata:
if key in _visible_admin_metadata_keys:
visible_admin_meta[key] = volume_admin_metadata[key]
else:
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def remove_invalid_filter_options(context, filters,
allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
LOG.debug('Path %s not found in is_blk_device check', dev)
return False
class ComparableMixin(object):
def _compare(self, other, method):
try:
return method(self._cmpkey(), other._cmpkey())
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def retry(exceptions, interval=1, retries=3, backoff_rate=2,
wait_random=False):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = interval * exp
if wait_random:
random.seed()
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def convert_str(text):
"""Convert to native string.
Convert bytes and Unicode strings to native strings:
* convert to bytes on Python 2:
encode Unicode using encodeutils.safe_encode()
* convert to Unicode on Python 3: decode bytes from UTF-8
"""
if six.PY2:
return encodeutils.safe_encode(text)
else:
if isinstance(text, bytes):
return text.decode('utf-8')
else:
return text
def trace_method(f):
"""Decorates a function if TRACE_METHOD is true."""
@functools.wraps(f)
def trace_method_logging_wrapper(*args, **kwargs):
if TRACE_METHOD:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_method_logging_wrapper
def trace_api(f):
"""Decorates a function if TRACE_API is true."""
@functools.wraps(f)
def trace_api_logging_wrapper(*args, **kwargs):
if TRACE_API:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_api_logging_wrapper
def trace(f):
"""Trace calls to the decorated function.
This decorator should always be defined as the outermost decorator so it
is defined last. This is important so it does not interfere
with other decorators.
Using this decorator on a function will cause its execution to be logged at
`DEBUG` level with arguments, return values, and exceptions.
:returns: a function decorator
"""
func_name = f.__name__
@functools.wraps(f)
def trace_logging_wrapper(*args, **kwargs):
if len(args) > 0:
maybe_self = args[0]
else:
maybe_self = kwargs.get('self', None)
if maybe_self and hasattr(maybe_self, '__module__'):
logger = logging.getLogger(maybe_self.__module__)
else:
logger = LOG
# NOTE(ameade): Don't bother going any further if DEBUG log level
# is not enabled for the logger.
if not logger.isEnabledFor(py_logging.DEBUG):
return f(*args, **kwargs)
all_args = inspect.getcallargs(f, *args, **kwargs)
logger.debug('==> %(func)s: call %(all_args)r',
{'func': func_name, 'all_args': all_args})
start_time = time.time() * 1000
try:
result = f(*args, **kwargs)
except Exception as exc:
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r',
{'func': func_name,
'time': total_time,
'exc': exc})
raise
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: return (%(time)dms) %(result)r',
{'func': func_name,
'time': total_time,
'result': result})
return result
return trace_logging_wrapper
class TraceWrapperMetaclass(type):
"""Metaclass that wraps all methods of a class with trace_method.
This metaclass will cause every function inside of the class to be
decorated with the trace_method decorator.
To use the metaclass you define a class like so:
@six.add_metaclass(utils.TraceWrapperMetaclass)
class MyClass(object):
"""
def __new__(meta, classname, bases, classDict):
newClassDict = {}
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
# replace it with a wrapped version
attribute = functools.update_wrapper(trace_method(attribute),
attribute)
newClassDict[attributeName] = attribute
return type.__new__(meta, classname, bases, newClassDict)
class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass):
"""Metaclass that wraps all methods of a class with trace."""
pass
def setup_tracing(trace_flags):
"""Set global variables for each trace flag.
Sets variables TRACE_METHOD and TRACE_API, which represent
whether to log method and api traces.
:param trace_flags: a list of strings
"""
global TRACE_METHOD
global TRACE_API
try:
trace_flags = [flag.strip() for flag in trace_flags]
except TypeError: # Handle when trace_flags is None or a test mock
trace_flags = []
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
TRACE_METHOD = 'method' in trace_flags
TRACE_API = 'api' in trace_flags
def resolve_hostname(hostname):
"""Resolves host name to IP address.
Resolves a host name (my.data.point.com) to an IP address (10.12.143.11).
This routine also works if the data passed in hostname is already an IP.
In this case, the same IP address will be returned.
:param hostname: Host name to resolve.
:returns: IP Address for Host name.
"""
result = socket.getaddrinfo(hostname, None)[0]
(family, socktype, proto, canonname, sockaddr) = result
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
{'host': hostname, 'ip': sockaddr[0]})
return sockaddr[0]
def build_or_str(elements, str_format=None):
"""Builds a string of elements joined by 'or'.
Will join strings with the 'or' word and if a str_format is provided it
will be used to format the resulted joined string.
If there are no elements an empty string will be returned.
:param elements: Elements we want to join.
:type elements: String or iterable of strings.
:param str_format: String to use to format the response.
:type str_format: String.
"""
if not elements:
return ''
if not isinstance(elements, six.string_types):
elements = _(' or ').join(elements)
if str_format:
return str_format % elements
return elements
def calculate_virtual_free_capacity(total_capacity,
free_capacity,
provisioned_capacity,
thin_provisioning_support,
max_over_subscription_ratio,
reserved_percentage):
"""Calculate the virtual free capacity based on thin provisioning support.
:param total_capacity: total_capacity_gb of a host_state or pool.
:param free_capacity: free_capacity_gb of a host_state or pool.
:param provisioned_capacity: provisioned_capacity_gb of a host_state
or pool.
:param thin_provisioning_support: thin_provisioning_support of
a host_state or a pool.
:param max_over_subscription_ratio: max_over_subscription_ratio of
a host_state or a pool
:param reserved_percentage: reserved_percentage of a host_state or
a pool.
:returns: the calculated virtual free capacity.
"""
total = float(total_capacity)
reserved = float(reserved_percentage) / 100
if thin_provisioning_support:
free = (total * max_over_subscription_ratio
- provisioned_capacity
- math.floor(total * reserved))
else:
# Calculate how much free space is left after taking into
# account the reserved space.
free = free_capacity - math.floor(total * reserved)
return free
def validate_integer(value, name, min_value=None, max_value=None):
"""Make sure that value is a valid integer, potentially within range.
:param value: the value of the integer
:param name: the name of the integer
:param min_length: the min_length of the integer
:param max_length: the max_length of the integer
:returns: integer
"""
try:
value = int(value)
except (TypeError, ValueError, UnicodeEncodeError):
raise webob.exc.HTTPBadRequest(explanation=(
_('%s must be an integer.') % name))
if min_value is not None and value < min_value:
raise webob.exc.HTTPBadRequest(
explanation=(_('%(value_name)s must be >= %(min_value)d') %
{'value_name': name, 'min_value': min_value}))
if max_value is not None and value > max_value:
raise webob.exc.HTTPBadRequest(
explanation=(_('%(value_name)s must be <= %(max_value)d') %
{'value_name': name, 'max_value': max_value}))
return value
|
|
"""Test the Universal Devices ISY994 config flow."""
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components import ssdp
from homeassistant.components.isy994.config_flow import CannotConnect
from homeassistant.components.isy994.const import (
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DOMAIN,
ISY_URL_POSTFIX,
UDN_UUID_PREFIX,
)
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_SSDP
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_HOSTNAME = "1.1.1.1"
MOCK_USERNAME = "test-username"
MOCK_PASSWORD = "test-password"
# Don't use the integration defaults here to make sure they're being set correctly.
MOCK_TLS_VERSION = 1.2
MOCK_IGNORE_STRING = "{IGNOREME}"
MOCK_RESTORE_LIGHT_STATE = True
MOCK_SENSOR_STRING = "IMASENSOR"
MOCK_VARIABLE_SENSOR_STRING = "HomeAssistant."
MOCK_USER_INPUT = {
CONF_HOST: f"http://{MOCK_HOSTNAME}",
CONF_USERNAME: MOCK_USERNAME,
CONF_PASSWORD: MOCK_PASSWORD,
CONF_TLS_VER: MOCK_TLS_VERSION,
}
MOCK_IMPORT_WITH_SSL = {
CONF_HOST: f"https://{MOCK_HOSTNAME}",
CONF_USERNAME: MOCK_USERNAME,
CONF_PASSWORD: MOCK_PASSWORD,
CONF_TLS_VER: MOCK_TLS_VERSION,
}
MOCK_IMPORT_BASIC_CONFIG = {
CONF_HOST: f"http://{MOCK_HOSTNAME}",
CONF_USERNAME: MOCK_USERNAME,
CONF_PASSWORD: MOCK_PASSWORD,
}
MOCK_IMPORT_FULL_CONFIG = {
CONF_HOST: f"http://{MOCK_HOSTNAME}",
CONF_USERNAME: MOCK_USERNAME,
CONF_PASSWORD: MOCK_PASSWORD,
CONF_IGNORE_STRING: MOCK_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE: MOCK_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING: MOCK_SENSOR_STRING,
CONF_TLS_VER: MOCK_TLS_VERSION,
CONF_VAR_SENSOR_STRING: MOCK_VARIABLE_SENSOR_STRING,
}
MOCK_DEVICE_NAME = "Name of the device"
MOCK_UUID = "CE:FB:72:31:B7:B9"
MOCK_VALIDATED_RESPONSE = {"name": MOCK_DEVICE_NAME, "uuid": MOCK_UUID}
PATCH_CONFIGURATION = "homeassistant.components.isy994.config_flow.Configuration"
PATCH_CONNECTION = "homeassistant.components.isy994.config_flow.Connection"
PATCH_ASYNC_SETUP = "homeassistant.components.isy994.async_setup"
PATCH_ASYNC_SETUP_ENTRY = "homeassistant.components.isy994.async_setup_entry"
async def test_form(hass: HomeAssistantType):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(PATCH_CONFIGURATION) as mock_config_class, patch(
PATCH_CONNECTION
) as mock_connection_class, patch(
PATCH_ASYNC_SETUP, return_value=True
) as mock_setup, patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
) as mock_setup_entry:
isy_conn = mock_connection_class.return_value
isy_conn.get_config.return_value = None
mock_config_class.return_value = MOCK_VALIDATED_RESPONSE
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == f"{MOCK_DEVICE_NAME} ({MOCK_HOSTNAME})"
assert result2["result"].unique_id == MOCK_UUID
assert result2["data"] == MOCK_USER_INPUT
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_host(hass: HomeAssistantType):
"""Test we handle invalid host."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": MOCK_HOSTNAME, # Test with missing protocol (http://)
"username": MOCK_USERNAME,
"password": MOCK_PASSWORD,
"tls": MOCK_TLS_VERSION,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_host"}
async def test_form_invalid_auth(hass: HomeAssistantType):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(PATCH_CONFIGURATION), patch(
PATCH_CONNECTION,
side_effect=ValueError("PyISY could not connect to the ISY."),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistantType):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(PATCH_CONFIGURATION), patch(
PATCH_CONNECTION,
side_effect=CannotConnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_existing_config_entry(hass: HomeAssistantType):
"""Test if config entry already exists."""
MockConfigEntry(domain=DOMAIN, unique_id=MOCK_UUID).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(PATCH_CONFIGURATION) as mock_config_class, patch(
PATCH_CONNECTION
) as mock_connection_class:
isy_conn = mock_connection_class.return_value
isy_conn.get_config.return_value = None
mock_config_class.return_value = MOCK_VALIDATED_RESPONSE
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_import_flow_some_fields(hass: HomeAssistantType) -> None:
"""Test import config flow with just the basic fields."""
with patch(PATCH_CONFIGURATION) as mock_config_class, patch(
PATCH_CONNECTION
) as mock_connection_class, patch(PATCH_ASYNC_SETUP, return_value=True), patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
):
isy_conn = mock_connection_class.return_value
isy_conn.get_config.return_value = None
mock_config_class.return_value = MOCK_VALIDATED_RESPONSE
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=MOCK_IMPORT_BASIC_CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_HOST] == f"http://{MOCK_HOSTNAME}"
assert result["data"][CONF_USERNAME] == MOCK_USERNAME
assert result["data"][CONF_PASSWORD] == MOCK_PASSWORD
async def test_import_flow_with_https(hass: HomeAssistantType) -> None:
"""Test import config with https."""
with patch(PATCH_CONFIGURATION) as mock_config_class, patch(
PATCH_CONNECTION
) as mock_connection_class, patch(PATCH_ASYNC_SETUP, return_value=True), patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
):
isy_conn = mock_connection_class.return_value
isy_conn.get_config.return_value = None
mock_config_class.return_value = MOCK_VALIDATED_RESPONSE
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=MOCK_IMPORT_WITH_SSL,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_HOST] == f"https://{MOCK_HOSTNAME}"
assert result["data"][CONF_USERNAME] == MOCK_USERNAME
assert result["data"][CONF_PASSWORD] == MOCK_PASSWORD
async def test_import_flow_all_fields(hass: HomeAssistantType) -> None:
"""Test import config flow with all fields."""
with patch(PATCH_CONFIGURATION) as mock_config_class, patch(
PATCH_CONNECTION
) as mock_connection_class, patch(PATCH_ASYNC_SETUP, return_value=True), patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
):
isy_conn = mock_connection_class.return_value
isy_conn.get_config.return_value = None
mock_config_class.return_value = MOCK_VALIDATED_RESPONSE
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=MOCK_IMPORT_FULL_CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_HOST] == f"http://{MOCK_HOSTNAME}"
assert result["data"][CONF_USERNAME] == MOCK_USERNAME
assert result["data"][CONF_PASSWORD] == MOCK_PASSWORD
assert result["data"][CONF_IGNORE_STRING] == MOCK_IGNORE_STRING
assert result["data"][CONF_RESTORE_LIGHT_STATE] == MOCK_RESTORE_LIGHT_STATE
assert result["data"][CONF_SENSOR_STRING] == MOCK_SENSOR_STRING
assert result["data"][CONF_VAR_SENSOR_STRING] == MOCK_VARIABLE_SENSOR_STRING
assert result["data"][CONF_TLS_VER] == MOCK_TLS_VERSION
async def test_form_ssdp_already_configured(hass: HomeAssistantType) -> None:
"""Test ssdp abort when the serial number is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: f"http://{MOCK_HOSTNAME}{ISY_URL_POSTFIX}"},
unique_id=MOCK_UUID,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_LOCATION: f"http://{MOCK_HOSTNAME}{ISY_URL_POSTFIX}",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "myisy",
ssdp.ATTR_UPNP_UDN: f"{UDN_UUID_PREFIX}{MOCK_UUID}",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_form_ssdp(hass: HomeAssistantType):
"""Test we can setup from ssdp."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_LOCATION: f"http://{MOCK_HOSTNAME}{ISY_URL_POSTFIX}",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "myisy",
ssdp.ATTR_UPNP_UDN: f"{UDN_UUID_PREFIX}{MOCK_UUID}",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(PATCH_CONFIGURATION) as mock_config_class, patch(
PATCH_CONNECTION
) as mock_connection_class, patch(
PATCH_ASYNC_SETUP, return_value=True
) as mock_setup, patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
) as mock_setup_entry:
isy_conn = mock_connection_class.return_value
isy_conn.get_config.return_value = None
mock_config_class.return_value = MOCK_VALIDATED_RESPONSE
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == f"{MOCK_DEVICE_NAME} ({MOCK_HOSTNAME})"
assert result2["result"].unique_id == MOCK_UUID
assert result2["data"] == MOCK_USER_INPUT
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
|
|
from gusto import *
from gusto import thermodynamics
from firedrake import (as_vector, SpatialCoordinate,
PeriodicRectangleMesh, ExtrudedMesh,
exp, cos, sin, cosh, sinh, tanh, pi, Function, sqrt)
import sys
day = 24.*60.*60.
hour = 60.*60.
dt = 30.
if '--running-tests' in sys.argv:
tmax = dt
tdump = dt
else:
tmax = 30*day
tdump = 2*hour
##############################################################################
# set up mesh
##############################################################################
# Construct 1d periodic base mesh
columns = 30 # number of columns
L = 1000000.
m = PeriodicRectangleMesh(columns, 1, 2.*L, 1.e5, quadrilateral=True)
# build 2D mesh by extruding the base mesh
nlayers = 30 # horizontal layers
H = 10000. # Height position of the model top
mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)
##############################################################################
# set up all the other things that state requires
##############################################################################
# Coriolis expression
f = 1.e-04
Omega = as_vector([0., 0., f*0.5])
# list of prognostic fieldnames
# this is passed to state and used to construct a dictionary,
# state.field_dict so that we can access fields by name
# u is the 3D velocity
# p is the pressure
# b is the buoyancy
fieldlist = ['u', 'rho', 'theta']
# class containing timestepping parameters
# all values not explicitly set here use the default values provided
# and documented in configuration.py
timestepping = TimesteppingParameters(dt=dt)
# class containing output parameters
# all values not explicitly set here use the default values provided
# and documented in configuration.py
dirname = 'compressible_eady'
output = OutputParameters(dirname=dirname,
dumpfreq=int(tdump/dt),
dumplist=['u', 'rho', 'theta'],
perturbation_fields=['rho', 'theta', 'ExnerPi'],
log_level='INFO')
# class containing physical parameters
# all values not explicitly set here use the default values provided
# and documented in configuration.py
parameters = CompressibleEadyParameters(H=H, f=f)
# class for diagnostics
# fields passed to this class will have basic diagnostics computed
# (eg min, max, l2 norm) and these will be output as a json file
diagnostics = Diagnostics(*fieldlist)
# list of diagnostic fields, each defined in a class in diagnostics.py
diagnostic_fields = [CourantNumber(), VelocityY(),
ExnerPi(), ExnerPi(reference=True),
CompressibleKineticEnergy(),
CompressibleKineticEnergyY(),
CompressibleEadyPotentialEnergy(),
Sum("CompressibleKineticEnergy",
"CompressibleEadyPotentialEnergy"),
Difference("CompressibleKineticEnergy",
"CompressibleKineticEnergyY")]
# setup state, passing in the mesh, information on the required finite element
# function spaces and the classes above
state = State(mesh, vertical_degree=1, horizontal_degree=1,
family="RTCF",
Coriolis=Omega,
timestepping=timestepping,
output=output,
parameters=parameters,
diagnostics=diagnostics,
fieldlist=fieldlist,
diagnostic_fields=diagnostic_fields)
##############################################################################
# Initial conditions
##############################################################################
u0 = state.fields("u")
rho0 = state.fields("rho")
theta0 = state.fields("theta")
# spaces
Vu = state.spaces("HDiv")
Vt = state.spaces("HDiv_v")
Vr = state.spaces("DG")
# first setup the background buoyancy profile
# z.grad(bref) = N**2
# the following is symbolic algebra, using the default buoyancy frequency
# from the parameters class.
x, y, z = SpatialCoordinate(mesh)
g = parameters.g
Nsq = parameters.Nsq
theta_surf = parameters.theta_surf
# N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz)
theta_ref = theta_surf*exp(Nsq*(z-H/2)/g)
theta_b = Function(Vt).interpolate(theta_ref)
# set theta_pert
def coth(x):
return cosh(x)/sinh(x)
def Z(z):
return Bu*((z/H)-0.5)
def n():
return Bu**(-1)*sqrt((Bu*0.5-tanh(Bu*0.5))*(coth(Bu*0.5)-Bu*0.5))
a = -4.5
Bu = 0.5
theta_exp = a*theta_surf/g*sqrt(Nsq)*(-(1.-Bu*0.5*coth(Bu*0.5))*sinh(Z(z))*cos(pi*(x-L)/L)
- n()*Bu*cosh(Z(z))*sin(pi*(x-L)/L))
theta_pert = Function(Vt).interpolate(theta_exp)
# set theta0
theta0.interpolate(theta_b + theta_pert)
# calculate hydrostatic Pi
rho_b = Function(Vr)
compressible_hydrostatic_balance(state, theta_b, rho_b)
compressible_hydrostatic_balance(state, theta0, rho0)
# set Pi0
Pi0 = calculate_Pi0(state, theta0, rho0)
state.parameters.Pi0 = Pi0
# set x component of velocity
cp = state.parameters.cp
dthetady = state.parameters.dthetady
Pi = thermodynamics.pi(state.parameters, rho0, theta0)
u = cp*dthetady/f*(Pi-Pi0)
# set y component of velocity
v = Function(Vr).assign(0.)
compressible_eady_initial_v(state, theta0, rho0, v)
# set initial u
u_exp = as_vector([u, v, 0.])
u0.project(u_exp)
# pass these initial conditions to the state.initialise method
state.initialise([('u', u0),
('rho', rho0),
('theta', theta0)])
# set the background profiles
state.set_reference_profiles([('rho', rho_b),
('theta', theta_b)])
##############################################################################
# Set up advection schemes
##############################################################################
# we need a DG funciton space for the embedded DG advection scheme
ueqn = AdvectionEquation(state, Vu)
rhoeqn = AdvectionEquation(state, Vr, equation_form="continuity")
thetaeqn = SUPGAdvection(state, Vt)
advected_fields = []
advected_fields.append(("u", SSPRK3(state, u0, ueqn)))
advected_fields.append(("rho", SSPRK3(state, rho0, rhoeqn)))
advected_fields.append(("theta", SSPRK3(state, theta0, thetaeqn)))
##############################################################################
# Set up linear solver for the timestepping scheme
##############################################################################
linear_solver = CompressibleSolver(state)
##############################################################################
# Set up forcing
##############################################################################
forcing = CompressibleEadyForcing(state)
##############################################################################
# build time stepper
##############################################################################
stepper = CrankNicolson(state, advected_fields, linear_solver, forcing)
##############################################################################
# Run!
##############################################################################
stepper.run(t=0, tmax=tmax)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import random
import shutil
import sys
import tempfile
import time
import unittest
if sys.version >= '3':
unicode = str
from datetime import date, datetime
from decimal import Decimal
from pyspark import TaskContext
from pyspark.rdd import PythonEvalType
from pyspark.sql import Column
from pyspark.sql.functions import array, col, expr, lit, sum, struct, udf, pandas_udf, \
PandasUDFType
from pyspark.sql.types import Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, test_compiled,\
test_not_compiled_message, have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
import numpy as np
@pandas_udf('double')
def random_udf(v):
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
@property
def nondeterministic_vectorized_iter_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.SCALAR_ITER)
def random_udf(it):
for v in it:
yield pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_pandas_udf_tokenize(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: str.split(' ')),
ArrayType(StringType()))
self.assertEqual(tokenize.returnType, ArrayType(StringType()))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[u'hi', u'boo']), Row(hi=[u'bye', u'boo'])], result.collect())
def test_pandas_udf_nested_arrays(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: [str.split(' ')]),
ArrayType(ArrayType(StringType())))
self.assertEqual(tokenize.returnType, ArrayType(ArrayType(StringType())))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[[u'hi', u'boo']]), Row(hi=[[u'bye', u'boo']])], result.collect())
def test_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(f, StringType(), udf_type)
int_f = pandas_udf(f, IntegerType(), udf_type)
long_f = pandas_udf(f, LongType(), udf_type)
float_f = pandas_udf(f, FloatType(), udf_type)
double_f = pandas_udf(f, DoubleType(), udf_type)
decimal_f = pandas_udf(f, DecimalType(), udf_type)
bool_f = pandas_udf(f, BooleanType(), udf_type)
array_long_f = pandas_udf(f, ArrayType(LongType()), udf_type)
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def random_iter_udf(it):
for i in it:
yield random.randint(6, 6) + i
random_pandas_iter_udf = pandas_udf(
random_iter_udf, IntegerType(), PandasUDFType.SCALAR_ITER).asNondeterministic()
self.assertEqual(random_pandas_iter_udf.deterministic, False)
self.assertEqual(random_pandas_iter_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
nondeterministic_pandas_iter_udf = self.spark.catalog.registerFunction(
"randomPandasIterUDF", random_pandas_iter_udf)
self.assertEqual(nondeterministic_pandas_iter_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_iter_udf.evalType,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
[row] = self.spark.sql("SELECT randomPandasIterUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
bool_f = pandas_udf(lambda x: x, BooleanType(), udf_type)
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
byte_f = pandas_udf(lambda x: x, ByteType(), udf_type)
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
short_f = pandas_udf(lambda x: x, ShortType(), udf_type)
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
int_f = pandas_udf(lambda x: x, IntegerType(), udf_type)
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
long_f = pandas_udf(lambda x: x, LongType(), udf_type)
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
float_f = pandas_udf(lambda x: x, FloatType(), udf_type)
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
double_f = pandas_udf(lambda x: x, DoubleType(), udf_type)
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18), udf_type)
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(lambda x: x, StringType(), udf_type)
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
df = self.spark.range(10)
scalar_f = lambda x: pd.Series(map(str, x))
def iter_f(it):
for i in it:
yield scalar_f(i)
for f, udf_type in [(scalar_f, PandasUDFType.SCALAR), (iter_f, PandasUDFType.SCALAR_ITER)]:
str_f = pandas_udf(f, StringType(), udf_type)
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(f, 'string', udf_type)
int_f = pandas_udf(f, 'integer', udf_type)
long_f = pandas_udf(f, 'long', udf_type)
float_f = pandas_udf(f, 'float', udf_type)
double_f = pandas_udf(f, 'double', udf_type)
decimal_f = pandas_udf(f, 'decimal(38, 18)', udf_type)
bool_f = pandas_udf(f, 'boolean', udf_type)
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(lambda x: x, BinaryType(), udf_type)
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()), udf_type)
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()), udf_type)
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
def scalar_func(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
def iter_func(it):
for id in it:
yield scalar_func(id)
for func, udf_type in [(scalar_func, PandasUDFType.SCALAR),
(iter_func, PandasUDFType.SCALAR_ITER)]:
f = pandas_udf(func, returnType=return_type, functionType=udf_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(f(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
g = pandas_udf(func, 'id: long, str: string', functionType=udf_type)
actual = df.select(g(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
struct_f = pandas_udf(lambda x: x, return_type, functionType=udf_type)
actual = df.select(struct_f(struct(col('id'), col('id').cast('string').alias('str'))))
self.assertEqual(expected, actual.collect())
def test_vectorized_udf_struct_complex(self):
df = self.spark.range(10)
return_type = StructType([
StructField('ts', TimestampType()),
StructField('arr', ArrayType(LongType()))])
def _scalar_f(id):
return pd.DataFrame({'ts': id.apply(lambda i: pd.Timestamp(i)),
'arr': id.apply(lambda i: [i, i + 1])})
scalar_f = pandas_udf(_scalar_f, returnType=return_type)
@pandas_udf(returnType=return_type, functionType=PandasUDFType.SCALAR_ITER)
def iter_f(it):
for id in it:
yield _scalar_f(id)
for f, udf_type in [(scalar_f, PandasUDFType.SCALAR), (iter_f, PandasUDFType.SCALAR_ITER)]:
actual = df.withColumn('f', f(col('id'))).collect()
for i, row in enumerate(actual):
id, f = row
self.assertEqual(i, id)
self.assertEqual(pd.Timestamp(i).to_pydatetime(), f[0])
self.assertListEqual([i, i + 1], f[1])
def test_vectorized_udf_nested_struct(self):
nested_type = StructType([
StructField('id', IntegerType()),
StructField('nested', StructType([
StructField('foo', StringType()),
StructField('bar', FloatType())
]))
])
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Invalid returnType with scalar Pandas UDFs'):
pandas_udf(lambda x: x, returnType=nested_type, functionType=udf_type)
def test_vectorized_udf_complex(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
scalar_add = pandas_udf(lambda x, y: x + y, IntegerType())
scalar_power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
scalar_mul = pandas_udf(lambda x, y: x * y, DoubleType())
@pandas_udf(IntegerType(), PandasUDFType.SCALAR_ITER)
def iter_add(it):
for x, y in it:
yield x + y
@pandas_udf(IntegerType(), PandasUDFType.SCALAR_ITER)
def iter_power2(it):
for x in it:
yield 2 ** x
@pandas_udf(DoubleType(), PandasUDFType.SCALAR_ITER)
def iter_mul(it):
for x, y in it:
yield x * y
for add, power2, mul in [(scalar_add, scalar_power2, scalar_mul),
(iter_add, iter_power2, iter_mul)]:
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
df = self.spark.range(10)
scalar_raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_raise_exception(it):
for x in it:
yield x * (1 / 0)
for raise_exception in [scalar_raise_exception, iter_raise_exception]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_udf_wong_output_size(it):
for _ in it:
yield pd.Series(1)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
"The number of output rows of pandas iterator UDF should be "
"the same with input rows"):
df.select(iter_udf_wong_output_size(col('id'))).collect()
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_udf_not_reading_all_input(it):
for batch in it:
batch_len = len(batch)
yield pd.Series([1] * batch_len)
break
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df1 = self.spark.range(10).repartition(1)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
"SQL_SCALAR_PANDAS_ITER_UDF should exhaust the input iterator"):
df1.select(iter_udf_not_reading_all_input(col('id'))).collect()
def test_vectorized_udf_chained(self):
df = self.spark.range(10)
scalar_f = pandas_udf(lambda x: x + 1, LongType())
scalar_g = pandas_udf(lambda x: x - 1, LongType())
iter_f = pandas_udf(lambda it: map(lambda x: x + 1, it), LongType(),
PandasUDFType.SCALAR_ITER)
iter_g = pandas_udf(lambda it: map(lambda x: x - 1, it), LongType(),
PandasUDFType.SCALAR_ITER)
for f, g in [(scalar_f, scalar_g), (iter_f, iter_g)]:
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_chained_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
@pandas_udf(return_type)
def scalar_f(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
scalar_g = pandas_udf(lambda x: x, return_type)
@pandas_udf(return_type, PandasUDFType.SCALAR_ITER)
def iter_f(it):
for id in it:
yield pd.DataFrame({'id': id, 'str': id.apply(unicode)})
iter_g = pandas_udf(lambda x: x, return_type, PandasUDFType.SCALAR_ITER)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
for f, g in [(scalar_f, scalar_g), (iter_f, iter_g)]:
actual = df.select(g(f(col('id'))).alias('struct')).collect()
self.assertEqual(expected, actual)
def test_vectorized_udf_wrong_return_type(self):
with QuietTest(self.sc):
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(LongType(), LongType()), udf_type)
def test_vectorized_udf_return_scalar(self):
df = self.spark.range(10)
scalar_f = pandas_udf(lambda x: 1.0, DoubleType())
iter_f = pandas_udf(lambda it: map(lambda x: 1.0, it), DoubleType(),
PandasUDFType.SCALAR_ITER)
for f in [scalar_f, iter_f]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def scalar_identity(x):
return x
@pandas_udf(returnType=LongType(), functionType=PandasUDFType.SCALAR_ITER)
def iter_identity(x):
return x
for identity in [scalar_identity, iter_identity]:
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
f = pandas_udf(lambda x: x, LongType(), udf_type)
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_struct_with_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))\
.withColumn('name', lit('John Doe'))
@pandas_udf("first string, last string")
def scalar_split_expand(n):
return n.str.split(expand=True)
@pandas_udf("first string, last string", PandasUDFType.SCALAR_ITER)
def iter_split_expand(it):
for n in it:
yield n.str.split(expand=True)
for split_expand in [scalar_split_expand, iter_split_expand]:
result = df.select(split_expand('name')).collect()
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('John', row[0]['first'])
self.assertEqual('Doe', row[0]['last'])
def test_vectorized_udf_varargs(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
scalar_f = pandas_udf(lambda *v: v[0], LongType())
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_f(it):
for v in it:
yield v[0]
for f in [scalar_f, iter_f]:
res = df.select(f(col('id'), col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
with QuietTest(self.sc):
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()), udf_type)
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*ArrayType.StructType'):
pandas_udf(lambda x: x,
ArrayType(StructType([StructField('a', IntegerType())])), udf_type)
def test_vectorized_udf_dates(self):
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),),
(4, date(2262, 4, 12),)]
df = self.spark.createDataFrame(data, schema=schema)
def scalar_check_data(idx, date, date_copy):
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
def iter_check_data(it):
for idx, date, date_copy in it:
yield scalar_check_data(idx, date, date_copy)
pandas_scalar_check_data = pandas_udf(scalar_check_data, StringType())
pandas_iter_check_data = pandas_udf(iter_check_data, StringType(),
PandasUDFType.SCALAR_ITER)
for check_data, udf_type in [(pandas_scalar_check_data, PandasUDFType.SCALAR),
(pandas_iter_check_data, PandasUDFType.SCALAR_ITER)]:
date_copy = pandas_udf(lambda t: t, returnType=DateType(), functionType=udf_type)
df = df.withColumn("date_copy", date_copy(col("date")))
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
def scalar_check_data(idx, timestamp, timestamp_copy):
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
def iter_check_data(it):
for idx, timestamp, timestamp_copy in it:
yield scalar_check_data(idx, timestamp, timestamp_copy)
pandas_scalar_check_data = pandas_udf(scalar_check_data, StringType())
pandas_iter_check_data = pandas_udf(iter_check_data, StringType(),
PandasUDFType.SCALAR_ITER)
for check_data, udf_type in [(pandas_scalar_check_data, PandasUDFType.SCALAR),
(pandas_iter_check_data, PandasUDFType.SCALAR_ITER)]:
# Check that a timestamp passed through a pandas_udf will not be altered by timezone
# calc
f_timestamp_copy = pandas_udf(lambda t: t,
returnType=TimestampType(), functionType=udf_type)
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def scalar_gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
@pandas_udf(returnType=TimestampType(), functionType=PandasUDFType.SCALAR_ITER)
def iter_gen_timestamps(it):
for id in it:
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
yield pd.Series(ts)
for gen_timestamps in [scalar_gen_timestamps, iter_gen_timestamps]:
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def scalar_check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
@pandas_udf(returnType=LongType(), functionType=PandasUDFType.SCALAR_ITER)
def iter_check_records_per_batch(it):
for x in it:
yield pd.Series(x.size).repeat(x.size)
for check_records_per_batch in [scalar_check_records_per_batch,
iter_check_records_per_batch]:
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
scalar_internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_internal_value(it):
for ts in it:
yield ts.apply(lambda ts: ts.value if ts is not pd.NaT else None)
for internal_value, udf_type in [(scalar_internal_value, PandasUDFType.SCALAR),
(iter_internal_value, PandasUDFType.SCALAR_ITER)]:
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType(), udf_type)
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
@pandas_udf('double')
def scalar_plus_ten(v):
return v + 10
@pandas_udf('double', PandasUDFType.SCALAR_ITER)
def iter_plus_ten(it):
for v in it:
yield v + 10
for plus_ten in [scalar_plus_ten, iter_plus_ten]:
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
df = self.spark.range(10)
for random_udf in [self.nondeterministic_vectorized_udf,
self.nondeterministic_vectorized_iter_udf]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
scalar_original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(scalar_original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(IntegerType(), PandasUDFType.SCALAR_ITER)
def iter_original_add(it):
for x, y in it:
yield x + y
self.assertEqual(iter_original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
for original_add in [scalar_original_add, iter_original_add]:
self.assertEqual(original_add.deterministic, True)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
def test_scalar_iter_udf_init(self):
import numpy as np
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def rng(batch_iter):
context = TaskContext.get()
part = context.partitionId()
np.random.seed(part)
for batch in batch_iter:
yield pd.Series(np.random.randint(100, size=len(batch)))
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 2}):
df = self.spark.range(10, numPartitions=2).select(rng(col("id").alias("v")))
result1 = df.collect()
result2 = df.collect()
self.assertEqual(result1, result2,
"SCALAR ITER UDF can initialize state and produce deterministic RNG")
def test_scalar_iter_udf_close(self):
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def test_close(batch_iter):
try:
for batch in batch_iter:
yield batch
finally:
raise RuntimeError("reached finally block")
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "reached finally block"):
self.spark.range(1).select(test_close(col("id"))).collect()
def test_scalar_iter_udf_close_early(self):
tmp_dir = tempfile.mkdtemp()
try:
tmp_file = tmp_dir + '/reach_finally_block'
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def test_close(batch_iter):
generator_exit_caught = False
try:
for batch in batch_iter:
yield batch
time.sleep(1.0) # avoid the function finish too fast.
except GeneratorExit as ge:
generator_exit_caught = True
raise ge
finally:
assert generator_exit_caught, "Generator exit exception was not caught."
open(tmp_file, 'a').close()
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 1,
"spark.sql.pandas.udf.buffer.size": 4}):
self.spark.range(10).repartition(1) \
.select(test_close(col("id"))).limit(2).collect()
# wait here because python udf worker will take some time to detect
# jvm side socket closed and then will trigger `GenerateExit` raised.
# wait timeout is 10s.
for i in range(100):
time.sleep(0.1)
if os.path.exists(tmp_file):
break
assert os.path.exists(tmp_file), "finally block not reached."
finally:
shutil.rmtree(tmp_dir)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime(2015, 11, 1, 0, 30),
datetime(2015, 11, 1, 1, 30),
datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
foo_udf = pandas_udf(lambda x: x, 'timestamp', udf_type)
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2_scalar(x):
assert type(x) == pd.Series
return x + 10
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def f2_iter(it):
for x in it:
assert type(x) == pd.Series
yield x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4_scalar(x):
assert type(x) == pd.Series
return x + 1000
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def f4_iter(it):
for x in it:
assert type(x) == pd.Series
yield x + 1000
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11).collect()
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111).collect()
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111).collect()
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011).collect()
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101).collect()
expected_multi = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111) \
.collect()
for f2, f4 in [(f2_scalar, f4_scalar), (f2_scalar, f4_iter),
(f2_iter, f4_scalar), (f2_iter, f4_iter)]:
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
self.assertEquals(expected_chained_1, df_chained_1.collect())
self.assertEquals(expected_chained_2, df_chained_2.collect())
self.assertEquals(expected_chained_3, df_chained_3.collect())
self.assertEquals(expected_chained_4, df_chained_4.collect())
self.assertEquals(expected_chained_5, df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
self.assertEquals(expected_multi, df_multi_1.collect())
self.assertEquals(expected_multi, df_multi_2.collect())
def test_mixed_udf_and_sql(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3s(x):
assert type(x) == pd.Series
return x + 100
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def f3i(it):
for x in it:
assert type(x) == pd.Series
yield x + 100
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.collect()
for f3 in [f3s, f3i]:
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
self.assertEquals(expected, df1.collect())
# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import numpy as np
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.connector.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_scalar import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#! /usr/bin/env python
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 3)
-s/--setup S: statement to be executed once initially (default 'pass')
-t/--time: use time.time() (default on Unix)
-c/--clock: use time.clock() (default on Windows)
-v/--verbose: print raw timing results; repeat for more digits precision
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
The difference in default timer function is because on Windows,
clock() has microsecond granularity but time()'s granularity is 1/60th
of a second; on Unix, clock() has 1/100th of a second granularity and
time() is much more precise. On either platform, the default timer
functions measure wall clock time, not the CPU time. This means that
other processes running on the same computer may interfere with the
timing. The best thing to do when accurate timing is necessary is to
repeat the timing a few times and use the best time. The -r option is
good for this; the default of 3 repetitions is probably enough in most
cases. On Unix, you can use clock() to measure CPU time.
Note: there is a certain baseline overhead associated with executing a
pass statement. The code here doesn't try to hide it, but you should
be aware of it. The baseline overhead can be measured by invoking the
program without arguments.
The baseline overhead differs between Python versions! Also, to
fairly compare older Python versions to Python 2.3, you may want to
use python -O for the older versions to avoid timing SET_LINENO
instructions.
"""
import gc
import sys
import time
try:
import itertools
except ImportError:
# Must be an older Python version (see timeit() below)
itertools = None
__all__ = ["Timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 3
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer):
%(setup)s
_t0 = _timer()
for _i in _it:
%(stmt)s
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
def _template_func(setup, func):
"""Create a timer function. Used if the "statement" is a callable."""
def inner(_it, _timer, _func=func):
setup()
_t0 = _timer()
for _i in _it:
_func()
_t1 = _timer()
return _t1 - _t0
return inner
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer):
"""Constructor. See class doc string."""
self.timer = timer
ns = {}
if isinstance(stmt, basestring):
stmt = reindent(stmt, 8)
if isinstance(setup, basestring):
setup = reindent(setup, 4)
src = template % {'stmt': stmt, 'setup': setup}
elif hasattr(setup, '__call__'):
src = template % {'stmt': stmt, 'setup': '_setup()'}
ns['_setup'] = setup
else:
raise ValueError("setup is neither a string nor callable")
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec code in globals(), ns
self.inner = ns["inner"]
elif hasattr(stmt, '__call__'):
self.src = None
if isinstance(setup, basestring):
_setup = setup
def setup():
exec _setup in globals(), ns
elif not hasattr(setup, '__call__'):
raise ValueError("setup is neither a string nor callable")
self.inner = _template_func(setup, stmt)
else:
raise ValueError("stmt is neither a string nor callable")
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
if itertools:
it = itertools.repeat(None, number)
else:
it = [None] * number
gcold = gc.isenabled()
if '__pypy__' not in sys.builtin_module_names:
gc.disable() # only do that on CPython
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 3;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer).repeat(repeat, number)
def main(args=None):
"""Main program, used when run as a script.
The optional argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:s:r:tcvh",
["number=", "setup=", "repeat=",
"time", "clock", "verbose", "help"])
except getopt.error, err:
print err
print "use -h/--help for command line help"
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-t", "--time"):
timer = time.time
if o in ("-c", "--clock"):
timer = time.clock
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print __doc__,
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for i in range(1, 10):
number = 10**i
try:
x = t.timeit(number)
except:
t.print_exc()
return 1
if verbose:
print "%d loops -> %.*g secs" % (number, precision, x)
if x >= 0.2:
break
try:
r = t.repeat(repeat, number)
except:
t.print_exc()
return 1
best = min(r)
if verbose:
print "raw times:", " ".join(["%.*g" % (precision, x) for x in r])
print "%d loops," % number,
usec = best * 1e6 / number
if usec < 1000:
print "best of %d: %.*g usec per loop" % (repeat, precision, usec)
else:
msec = usec / 1000
if msec < 1000:
print "best of %d: %.*g msec per loop" % (repeat, precision, msec)
else:
sec = msec / 1000
print "best of %d: %.*g sec per loop" % (repeat, precision, sec)
return None
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration and Information storage
-------------------------------------
Provides runtime wrappers for persistent (database) objects with
extra methods for constructing active controllers.
"""
import os
from pycopia import logging
from pycopia.aid import NULL
from pycopia.dictlib import AttrDict
from pycopia.QA.exceptions import ConfigError
from pycopia.QA.db import models
from pycopia.QA.db import config
Config = models.Config
class RootContainer(config.Container):
"""RootContainer is the primary configuration holder.
The root container is special. It contains special object
constructor methods, and a local writeable cache. It also supports
path access using the dot as path separator.
"""
def __init__(self, container, cache):
super(RootContainer, self).__init__(container)
vars(self)["_cache"] = cache
def __repr__(self):
return "<RootContainer>"
def __getattribute__(self, key):
if key == "__dict__":
return object.__getattribute__(self, key)
try:
# check the local cache first, overrides persistent storage
return vars(self)["_cache"].__getitem__(key)
except KeyError:
pass
try:
return super(RootContainer, self).__getattribute__(key)
except AttributeError:
d = vars(self)
node = d["node"]
try:
item = config.get_item(node, key)
if item.value is NULL:
return config.Container(item)
else:
return item.value
except models.DoesNotExist as err:
raise AttributeError("RootContainer: No attribute or key "
"'%s' found: %s" % (key, err))
def __setattr__(self, key, obj):
d = vars(self)
if key in vars(self.__class__):
type.__setattr__(self.__class__, key, obj)
elif key in d: # existing local attribute
d[key] = obj
else:
d["_cache"].__setitem__(key, obj)
def __delattr__(self, key):
try:
vars(self)["_cache"].__delitem__(key)
except KeyError:
object.__delattr__(self, key)
def __getitem__(self, key):
try:
return getattr(self._cache, key)
except (AttributeError, KeyError, NameError):
return super(RootContainer, self).__getitem__(key)
def __setitem__(self, key, value):
if key in self._cache:
self._cache[key] = value
else:
return super(RootContainer, self).__setitem__(key, value)
def __delitem__(self, key):
try:
del self._cache[key]
except KeyError:
super(RootContainer, self).__delitem__(key)
def get(self, key, default=None):
try:
rv = self.__getitem__(key)
except KeyError:
rv = default
return rv
def has_key(self, key):
return key in self._cache or key in super(RootContainer, self)
def copy(self):
return self.__class__(self.node, self._cache.copy())
# files update the local cache only.
def mergefile(self, filename):
if os.path.isfile(filename):
gb = dict(self.items())
exec(compile(
open(filename).read(), filename, 'exec'), gb, self._cache)
# Updates done from external dicts only update the local cache. If you
# want it persistent, enter it into the persistent store another way.
def update(self, other):
for k, v in list(other.items()):
d = self._cache
path = k.split(".") # allows for keys with dot-path
for part in path[:-1]:
d = d[part]
# Use setattr for the sake of attribute-dicts, properties, and
# other objects.
setattr(d, path[-1], v)
def setdefault(self, key, val):
d = self._cache
path = key.split(".")
for part in path[:-1]:
d = d[part]
return d.setdefault(path[-1], val)
def evalset(self, k, v):
"""Evaluates the (string) value to convert it to an object in the
storage first. Useful for specifying objects from string-sources, such
as the command line. """
if type(v) is str:
try:
v = eval(v, {}, vars(self))
except:
pass
d = self._cache
path = k.split(".") # allows for keys with dot-path
for part in path[:-1]:
d = d[part]
# Use setattr for attribute-dicts, properties, and other objects.
setattr(d, path[-1], v)
def evalupdate(self, other):
for k, v in other.items():
self.evalset(k, v)
def get_account(self, identifier):
"""Get account credentials by identifier."""
AID = models.AccountIds
try:
acct = AID.select().where(AID.identifier == identifier).get()
except models.DoesNotExist as err:
raise ConfigError(
"Bad account identifier {!r}: {!s}".format(identifier, err))
return acct.login, acct.password
def get_mock_config(filelist=None, initdict=None, kwargs=None):
flags = AttrDict()
flags.DEBUG = 0
flags.VERBOSE = 0
cf = AttrDict()
cf.flags = flags
if filelist:
for f in filelist:
if os.path.isfile(f):
gb = globals()
exec(compile(open(f).read(), f, 'exec'), gb, cf)
if type(initdict) is dict:
cf.update(initdict)
if type(kwargs) is dict:
cf.update(kwargs)
return cf
def get_config(storageurl=None, _extrafiles=None, initdict=None, **kwargs):
"""Get primary configuration.
Returns a RootContainer instance containing configuration parameters. An
extra dictionary may be merged in with the 'initdict' parameter. And
finally, extra options may be added with keyword parameters when calling
this.
"""
models.connect(storageurl)
files = []
if type(_extrafiles) is str:
_extrafiles = [_extrafiles]
if _extrafiles:
files.extend(_extrafiles)
try:
rootnode = config.get_root()
except models.OperationalError:
logging.exception_warning(
"Could not connect to database. Configuration not available.")
return get_mock_config(files, initdict, kwargs)
cache = AttrDict()
flags = AttrDict()
# copy flag values to cache so changes don't persist.
flagsnode = Config.select().where(
(Config.parent == rootnode) & (Config.name == "flags")).get()
for valnode in flagsnode.children:
flags[valnode.name] = valnode.value
cache.flags = flags
cf = RootContainer(rootnode, cache)
for f in files:
if os.path.isfile(f):
cf.mergefile(f)
if type(initdict) is dict:
cf.evalupdate(initdict)
cf.update(kwargs)
return cf
if __name__ == "__main__":
from pycopia import autodebug
cf = get_config()
print(cf)
print(cf.flags)
print(cf.flags.DEBUG)
#cf.environmentname = "default"
#env = cf._get_environment()
# env = cf.environment
# print("Environment:")
# print(env)
# print("Supported roles:")
# print(env.get_supported_roles())
## print env.get_role("testcontroller")
# #print env._get_DUT()
# #dut = env.DUT
# #print dut["default_role"]
# print(cf.environment._environment.owner)
# del cf.environment
|
|
# pylint: disable=line-too-long, no-member
from __future__ import division
from builtins import str # pylint: disable=redefined-builtin
import csv
import io
import json
import tempfile
import time
from zipfile import ZipFile
from past.utils import old_div
import arrow
import requests
from django.utils import timezone
from ..models import DataPoint, install_supports_jsonfield
REFRESH_ENDPOINT = 'https://account.health.nokia.com/oauth2/token'
def compile_report(generator, sources): # pylint: disable=too-many-locals
now = arrow.get()
filename = tempfile.gettempdir() + '/pdk_export_' + str(now.timestamp) + str(old_div(now.microsecond, 1e6)) + '.zip'
if generator == 'pdk-nokia-health-full':
with ZipFile(filename, 'w') as export_file:
for source in sources:
last_point = DataPoint.objects.filter(source=source, generator_identifier='pdk-nokia-health', secondary_identifier='server-credentials').order_by('-created').first()
if last_point is not None:
properties = last_point.fetch_properties()
if 'access_token' in properties and 'refresh_token' in properties and 'client_id' in properties and 'client_secret' in properties:
refresh_params = {
'grant_type': 'refresh_token',
'client_id': properties['client_id'],
'client_secret': properties['client_secret'],
'refresh_token': properties['refresh_token'],
}
api_request = requests.post(REFRESH_ENDPOINT, data=refresh_params)
access_payload = api_request.json()
access_token = access_payload['access_token']
first_point = DataPoint.objects.filter(source=source, generator_identifier='pdk-nokia-health').order_by('created').first()
# Added for legacy compatibility with legacy APIs
first_withings = DataPoint.objects.filter(source=source, generator_identifier='pdk-withings-device').order_by('created').first()
if first_withings is not None and first_withings.created < first_point.created:
first_point = first_withings
intraday_file = fetch_intraday(source, arrow.get(first_point.created), access_token)
export_file.write(intraday_file, source + '/' + intraday_file.split('/')[-1])
sleep_file = fetch_sleep_measures(source, arrow.get(first_point.created), access_token)
export_file.write(sleep_file, source + '/' + sleep_file.split('/')[-1])
new_point = DataPoint(source=last_point.source)
new_point.generator = last_point.generator
new_point.generator_identifier = last_point.generator_identifier
new_point.generator_identifier = last_point.generator_identifier
new_point.generator_identifier = last_point.generator_identifier
new_point.secondary_identifier = last_point.secondary_identifier
new_point.user_agent = 'Passive Data Kit Server'
new_point.created = timezone.now()
new_point.recorded = new_point.created
properties['access_token'] = access_payload['access_token']
properties['refresh_token'] = access_payload['refresh_token']
if install_supports_jsonfield():
new_point.properties = properties
else:
new_point.properties = json.dumps(properties, indent=2)
new_point.save()
return filename
return None
def fetch_intraday(source, start, access_token): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
final_end = arrow.now()
intraday_filename = tempfile.gettempdir() + '/pdk-nokia-health-full-intraday.txt'
with io.open(intraday_filename, 'w', encoding='utf-8') as outfile:
writer = csv.writer(outfile, delimiter='\t')
columns = [
'Source',
'Created Timestamp',
'Created Date',
'Duration',
'Calories',
'Distance',
'Steps',
'Elevation',
'Strokes',
'Pool Laps',
]
writer.writerow(columns)
while start < final_end:
end = start.shift(hours=+12)
api_url = 'https://api.health.nokia.com/v2/measure?action=getintradayactivity'
api_url += '&access_token=' + access_token
api_url += '&startdate=' + str(start.timestamp)
api_url += '&enddate=' + str(end.timestamp)
response = requests.get(url=api_url)
results = response.json()
if 'body' in results and 'series' in results['body']:
if results['body']['series'] == []:
return None
for timestamp, values in list(results['body']['series'].items()):
row = []
row.append(source)
row.append(timestamp)
created_date = arrow.get(timestamp).datetime
row.append(created_date.isoformat())
row.append(values['duration'])
if 'calories' in values:
row.append(values['calories'])
else:
row.append(None)
if 'distance' in values:
row.append(values['distance'])
else:
row.append(None)
if 'steps' in values:
row.append(values['steps'])
else:
row.append(None)
if 'elevation' in values:
row.append(values['elevation'])
else:
row.append(None)
if 'strokes' in values:
row.append(values['strokes'])
else:
row.append(None)
if 'pool_lap' in values:
row.append(values['pool_lap'])
else:
row.append(None)
writer.writerow(row)
time.sleep(1)
start = end
return intraday_filename
def fetch_sleep_measures(source, start, access_token): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
final_end = arrow.now()
sleep_filename = tempfile.gettempdir() + '/pdk-nokia-health-full-sleep.txt'
with io.open(sleep_filename, 'w', encoding='utf-8') as outfile:
writer = csv.writer(outfile, delimiter='\t')
columns = [
'Source',
'Duration Start',
'Duration End',
'Sleep State',
'Device Model',
]
writer.writerow(columns)
while start < final_end:
end = start.shift(hours=+12)
api_url = 'https://api.health.nokia.com/v2/sleep?action=get'
api_url += '&access_token=' + access_token
api_url += '&startdate=' + str(start.timestamp)
api_url += '&enddate=' + str(end.timestamp)
response = requests.get(url=api_url)
results = response.json()
if 'body' in results and 'series' in results['body']:
for item in results['body']['series']:
row = []
row.append(source)
row.append(item['startdate'])
row.append(item['enddate'])
if item['state'] == 0:
row.append('awake')
elif item['state'] == 1:
row.append('light-sleep')
elif item['state'] == 2:
row.append('deep-sleep')
elif item['state'] == 3:
row.append('rem-sleep')
else:
row.append('unknown (' + str(item['state']) + ')')
if results['body']['model'] == 32:
row.append('aura')
elif results['body']['model'] == 16:
row.append('activity-tracker')
else:
row.append('unknown (' + str(results['body']['model']) + ')')
writer.writerow(row)
time.sleep(1)
start = end
return sleep_filename
|
|
# coding: utf-8
import math
VERSION = "3.2"
H_KEY = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
H_BASE = 20037508.34
H_DEG = math.pi * (30 / 180.0)
H_K = math.tan(H_DEG)
def calcHexSize(level):
return H_BASE / (3.0 ** (level + 3))
class Zone:
def __init__(self, lat, lon, x, y, code):
self.lat = lat
self.lon = lon
self.x = x
self.y = y
self.code = code
def getLevel(self):
return len(self.code) - 2
def getHexSize(self):
return calcHexSize(self.getLevel())
def getHexCoords(self):
h_lat = self.lat
h_lon = self.lon
h_x, h_y = loc2xy(h_lon, h_lat)
h_deg = math.tan(math.pi * (60.0 / 180.0))
h_size = self.getHexSize()
h_top = xy2loc(h_x, h_y + h_deg * h_size)[1]
h_btm = xy2loc(h_x, h_y - h_deg * h_size)[1]
h_l = xy2loc(h_x - 2 * h_size, h_y)[0]
h_r = xy2loc(h_x + 2 * h_size, h_y)[0]
h_cl = xy2loc(h_x - 1 * h_size, h_y)[0]
h_cr = xy2loc(h_x + 1 * h_size, h_y)[0]
return (
(h_lat, h_l),
(h_top, h_cl),
(h_top, h_cr),
(h_lat, h_r),
(h_btm, h_cr),
(h_btm, h_cl)
)
def getZoneByLocation(lat, lon, level):
x, y = getXYByLocation(lat, lon, level)
zone = getZoneByXY(x, y, level)
return zone
def getZoneByCode(code):
x, y = getXYByCode(code)
level = len(code) - 2
zone = getZoneByXY(x, y, level)
return zone
def getXYByLocation(lat, lon, level):
h_size = calcHexSize(level)
lon_grid, lat_grid = loc2xy(lon, lat)
unit_x = 6 * h_size
unit_y = 6 * h_size * H_K
h_pos_x = (lon_grid + lat_grid / H_K) / unit_x
h_pos_y = (lat_grid - H_K * lon_grid) / unit_y
h_x_0 = math.floor(h_pos_x)
h_y_0 = math.floor(h_pos_y)
h_x_q = h_pos_x - h_x_0
h_y_q = h_pos_y - h_y_0
h_x = round(h_pos_x)
h_y = round(h_pos_y)
if h_y_q > -h_x_q + 1:
if (h_y_q < 2 * h_x_q) and (h_y_q > 0.5 * h_x_q):
h_x = h_x_0 + 1
h_y = h_y_0 + 1
elif h_y_q < -h_x_q + 1:
if (h_y_q > (2 * h_x_q) -1) and (h_y_q < (0.5 * h_x_q) + 0.5):
h_x = h_x_0
h_y = h_y_0
h_x, h_y, h_rev = adjustXY(h_x, h_y, level)
return (h_x, h_y)
def getXYByCode(code):
level = len(code) - 2
h_size = calcHexSize(level)
unit_x = 6 * h_size
unit_y = 6 * h_size * H_K
h_x = 0
h_y = 0
h_dec9 = str(H_KEY.index(code[0]) * 30 + H_KEY.index(code[1])) + code[2:]
if h_dec9[0] in "15" and h_dec9[1] not in "125" and h_dec9[2] not in "125":
if h_dec9[0] == "5":
h_dec9 = "7" + h_dec9[1:]
elif h_dec9[0] == "1":
h_dec9 = "3" + h_dec9[1:]
h_dec9 = "0" * (level + 2 - len(h_dec9)) + h_dec9
h_dec3 = ""
for dec9s in h_dec9:
dec9i = int(dec9s)
h_dec3 += "012"[dec9i//3] + "012"[dec9i%3]
h_decx = h_dec3[0::2]
h_decy = h_dec3[1::2]
for i in range(level + 3):
h_pow = 3 ** (level + 2 - i)
if h_decx[i] == "0":
h_x -= h_pow
elif h_decx[i] == "2":
h_x += h_pow
if h_decy[i] == "0":
h_y -= h_pow
elif h_decy[i] == "2":
h_y += h_pow
h_x, h_y, l_rev = adjustXY(h_x, h_y, level)
return (h_x, h_y)
def getZoneByXY(x, y, level):
h_size = calcHexSize(level)
h_x, h_y = x, y
unit_x = 6 * h_size
unit_y = 6 * h_size * H_K
h_lat = (H_K * h_x * unit_x + h_y * unit_y) / 2.0
h_lon = (h_lat - h_y * unit_y) / H_K
z_loc_x, z_loc_y = xy2loc(h_lon, h_lat)
max_hsteps = 3 ** (level + 2)
hsteps = abs(h_x - h_y)
if hsteps == max_hsteps:
if h_x > h_y:
h_x, h_y = h_y, h_x
z_loc_x = -180
h_code = ""
code3_x = []
code3_y = []
mod_x, mod_y = h_x, h_y
for i in range(level + 3):
h_pow = 3 ** (level + 2 - i)
if mod_x >= math.ceil(h_pow / 2.0):
code3_x.append(2)
mod_x -= h_pow
elif mod_x <= -math.ceil(h_pow / 2.0):
code3_x.append(0)
mod_x += h_pow
else:
code3_x.append(1)
if mod_y >= math.ceil(h_pow / 2.0):
code3_y.append(2)
mod_y -= h_pow
elif mod_y <= -math.ceil(h_pow / 2.0):
code3_y.append(0)
mod_y += h_pow
else:
code3_y.append(1)
if i == 2 and (z_loc_x == -180 or z_loc_x >= 0):
if code3_x[0] == 2 and code3_y[0] == 1 and code3_x[1] == code3_y[1] and code3_x[2] == code3_y[2]:
code3_x[0] = 1
code3_y[0] = 2
elif code3_x[0] == 1 and code3_y[0] == 0 and code3_x[1] == code3_y[1] and code3_x[2] == code3_y[2]:
code3_x[0] = 0
code3_y[0] = 1
for i in range(len(code3_x)):
code3 = str(code3_x[i]) + str(code3_y[i])
code9 = str(int(code3, 3))
h_code += code9
h_2 = h_code[3:]
h_1 = h_code[0:3]
h_a1 = int(h_1) // 30
h_a2 = int(h_1) % 30
h_code = H_KEY[h_a1] + H_KEY[h_a2] + h_2
return Zone(z_loc_y, z_loc_x, x, y, h_code)
def adjustXY(x, y, level):
h_x = x
h_y = y
rev = 0
max_hsteps = 3 ** (level + 2)
hsteps = abs(h_x - h_y)
if hsteps == max_hsteps and x > y:
h_x, h_y = h_y, h_x
rev = 1
elif hsteps > max_hsteps:
dif = hsteps - max_hsteps
dif_x = dif // 2
dif_y = dif - dif_x
if x > y:
edge_x = h_x - dif_x
edge_y = h_y + dif_y
edge_x, edge_y = edge_y, edge_x
h_x = edge_x + dif_x
h_y = edge_y - dif_y
elif y > x:
edge_x = h_x + dif_x
edge_y = h_y - dif_y
edge_x, edge_y = edge_y, edge_x
h_x = edge_x - dif_x
h_y = edge_y + dif_y
return (h_x, h_y, rev)
def loc2xy(lon, lat):
x = lon * H_BASE / 180.0
y = math.log(math.tan((90.0 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
y *= H_BASE / 180.0
return (x, y)
def xy2loc(x, y):
lon = (x / H_BASE) * 180.0
lat = (y / H_BASE) * 180.0
lat = 180 / math.pi * (2.0 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return (lon, lat)
|
|
"""Test the Kodi config flow."""
import pytest
from homeassistant import config_entries
from homeassistant.components.kodi.config_flow import (
CannotConnectError,
InvalidAuthError,
)
from homeassistant.components.kodi.const import DEFAULT_TIMEOUT, DOMAIN
from .util import (
TEST_CREDENTIALS,
TEST_DISCOVERY,
TEST_HOST,
TEST_IMPORT,
TEST_WS_PORT,
UUID,
MockConnection,
MockWSConnection,
get_kodi_connection,
)
from tests.async_mock import AsyncMock, PropertyMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
async def user_flow(hass):
"""Return a user-initiated flow after filling in host info."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
return result["flow_id"]
async def test_user_flow(hass, user_flow):
"""Test a successful user initiated flow."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
"password": None,
"username": None,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_valid_auth(hass, user_flow):
"""Test we handle valid auth."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
**TEST_CREDENTIALS,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_valid_ws_port(hass, user_flow):
"""Test we handle valid websocket port."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
"password": None,
"username": None,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_empty_ws_port(hass, user_flow):
"""Test we handle an empty websocket port input."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"ws_port": 0}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
"ws_port": None,
"password": None,
"username": None,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass, user_flow):
"""Test we handle invalid auth."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {"base": "invalid_auth"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {"base": "unknown"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
async def test_form_cannot_connect_http(hass, user_flow):
"""Test we handle cannot connect over HTTP error."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_exception_http(hass, user_flow):
"""Test we handle generic exception over HTTP."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unknown"}
async def test_form_cannot_connect_ws(hass, user_flow):
"""Test we handle cannot connect over WebSocket error."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection, "connected", new_callable=PropertyMock(return_value=False)
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_exception_ws(hass, user_flow):
"""Test we handle generic exception over WebSocket."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection, "connect", AsyncMock(side_effect=Exception)
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {"base": "unknown"}
async def test_discovery(hass):
"""Test discovery flow works."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "discovery_confirm"
with patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
flow_id=result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "hostname"
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
"password": None,
"username": None,
"name": "hostname",
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_cannot_connect_http(hass):
"""Test discovery aborts if cannot connect."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_discovery_cannot_connect_ws(hass):
"""Test discovery aborts if cannot connect to websocket."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
async def test_discovery_exception_http(hass, user_flow):
"""Test we handle generic exception during discovery validation."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_discovery_invalid_auth(hass):
"""Test we handle invalid auth during discovery."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {}
async def test_discovery_duplicate_data(hass):
"""Test discovery aborts if same mDNS packet arrives."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_discovery_updates_unique_id(hass):
"""Test a duplicate discovery id aborts and updates existing entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=UUID,
data={"host": "dummy", "port": 11, "namename": "dummy.local."},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "1.1.1.1"
assert entry.data["port"] == 8080
assert entry.data["name"] == "hostname"
async def test_form_import(hass):
"""Test we get the form with import source."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_IMPORT["name"]
assert result["data"] == TEST_IMPORT
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import_invalid_auth(hass):
"""Test we handle invalid auth on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_auth"
async def test_form_import_cannot_connect(hass):
"""Test we handle cannot connect on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_form_import_exception(hass):
"""Test we handle unknown exception on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AddSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import addsign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def addsign_update_numpy(params,
g_t,
m,
lr,
alpha=1.0,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)
params_t = params - lr * multiplier * g_t
return params_t, m_t
class AddSignTest(test.TestCase):
def _testDense(self,
use_resource=False,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
alpha=1.0,
beta=0.9):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(
0, trainable=False)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
global_step = variables.Variable(
0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = addsign.AddSignOptimizer(
learning_rate=learning_rate,
alpha=alpha,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
else:
if not context.executing_eagerly():
self.evaluate(neg_update)
elif t > 1:
opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
var0_np, m0 = addsign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = addsign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testDense(use_resource=False)
self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8)
self._testDense(use_resource=False,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
self._testDense(use_resource=True)
self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8)
self._testDense(use_resource=True,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
def _testSparse(self,
use_resource=False,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
alpha=1.0,
beta=0.9):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(
0, trainable=False)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
global_step = variables.Variable(
0, trainable=False)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = addsign.AddSignOptimizer(
learning_rate=learning_rate,
alpha=alpha,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 4):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = addsign_update_numpy(
var0_np,
grads0_np,
m0,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = addsign_update_numpy(
var1_np,
grads1_np,
m1,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testSparse(use_resource=False)
self._testSparse(use_resource=False,
learning_rate=0.01,
alpha=0.1,
beta=0.8)
self._testSparse(use_resource=False,
sign_decay_fn=sign_decay_fn,
py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python
"""
Construct a neural network model, support vector and decision trees regression models from the data
"""
import pickle
import lasagne
import numpy as np
import sklearn
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from nolearn.lasagne import NeuralNet
from scipy.stats import randint as sp_randint
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge, BayesianRidge, Lasso
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.svm import LinearSVR
from sklearn.tree import DecisionTreeRegressor
__author__ = "Pearl Philip"
__credits__ = "David Beck"
__license__ = "BSD 3-Clause License"
__maintainer__ = "Pearl Philip"
__email__ = "pphilip@uw.edu"
__status__ = "Development"
def run_models(x_train, y_train, x_test, y_test, n_features):
"""
Driving all machine learning models as parallel processes.
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
model_choice = int(input("Type your choice of model to be run:" + "\n" +
"1 for Linear Regression" + "\n" +
"2 for Neural Network" + "\n" +
"3 for Support Vector Machine" + "\n" +
"4 for Decision Tree" + "\n" +
"5 for Ridge Regression" + "\n" +
"6 for Bayesian Ridge Regression" + "\n" +
"7 for Lasso:" + "\n" +
"8 for Random Forest Regressor:" + "\n"
))
if model_choice == 1:
build_linear(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 2:
build_nn(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 3:
build_svm(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 4:
build_tree(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 5:
build_ridge(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 6:
build_bayesian_rr(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 7:
build_lasso(x_train, y_train, x_test, y_test, n_features)
elif model_choice == 8:
build_forest(x_train, y_train, x_test, y_test, n_features)
else:
print("Please choose from list of available models only")
return
def build_linear(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a decision trees regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
clf = LinearRegression(n_jobs=-1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
with open('../trained_networks/lr_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_nn(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a regression neural network model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
net = NeuralNet(layers=[('input', InputLayer),
('hidden0', DenseLayer),
('hidden1', DenseLayer),
('output', DenseLayer)],
input_shape=(None, x_train.shape[1]), # Number of i/p nodes = number of columns in x
hidden0_num_units=15,
hidden0_nonlinearity=lasagne.nonlinearities.softmax,
hidden1_num_units=17,
hidden1_nonlinearity=lasagne.nonlinearities.softmax,
output_num_units=1, # Number of o/p nodes = number of columns in y
output_nonlinearity=lasagne.nonlinearities.softmax,
max_epochs=100,
update_learning_rate=0.01,
regression=True,
verbose=0)
# Finding the optimal set of params for each variable in the training of the neural network
param_dist = {'hidden0_num_units':sp_randint(3, 30), 'hidden1_num_units':sp_randint(3, 30)}
clf = RandomizedSearchCV(estimator=net, param_distributions=param_dist,
n_iter=15, n_jobs=-1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
with open('../trained_networks/nn_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(net, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_svm(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a support vector regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
clf = LinearSVR(random_state=1, dual=False, epsilon=0,
loss='squared_epsilon_insensitive')
# Random state has int value for non-random sampling
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
with open('../trained_networks/svm_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_tree(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a decision trees regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
model = DecisionTreeRegressor()
param_dist = {'max_depth': sp_randint(1, 15),
'min_samples_split': sp_randint(2, 15)}
clf = RandomizedSearchCV(estimator=model, param_distributions=param_dist,
n_iter=15, n_jobs=-1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(clf.best_params_, clf.best_score_)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
with open('../trained_networks/dt_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_ridge(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a ridge regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
clf = Ridge()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
# Optimal ridge regression alpha value from CV
ridge_alpha = clf.alpha_
with open('../trained_networks/rr_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_bayesian_rr(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a Bayesian ridge regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
clf = BayesianRidge()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
# Optimal ridge regression alpha value from CV
ridge_alpha = clf.alpha_
with open('../trained_networks/brr_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_lasso(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a Lasso linear model with cross validation from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
model = Lasso(random_state=1)
# Random state has int value for non-random sampling
param_dist = {'alpha': np.arange( 0.0001, 1, 0.001 ).tolist()}
clf = RandomizedSearchCV(estimator=model, param_distributions=param_dist,
n_iter=15, n_jobs=-1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(clf.best_params_, clf.best_score_)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
with open('../trained_networks/lasso_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
def build_forest(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a random forest regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
model = RandomForestRegressor()
param_dist = {'max_depth': sp_randint(1, 15),
'min_samples_split': sp_randint(2, 15)}
clf = RandomizedSearchCV(estimator=model, param_distributions=param_dist,
n_iter=15, n_jobs=-1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
with open('../trained_networks/rfr_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
print(r2)
return
|
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class BuildEnvironmentRest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuildEnvironmentRest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'description': 'str',
'system_image_repository_url': 'str',
'attributes': 'dict(str, str)',
'system_image_type': 'str',
'image_repository_url': 'str',
'image_id': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'system_image_repository_url': 'systemImageRepositoryUrl',
'attributes': 'attributes',
'system_image_type': 'systemImageType',
'image_repository_url': 'imageRepositoryUrl',
'image_id': 'imageId'
}
self._id = None
self._name = None
self._description = None
self._system_image_repository_url = None
self._attributes = None
self._system_image_type = None
self._image_repository_url = None
self._image_id = None
@property
def id(self):
"""
Gets the id of this BuildEnvironmentRest.
:return: The id of this BuildEnvironmentRest.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BuildEnvironmentRest.
:param id: The id of this BuildEnvironmentRest.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this BuildEnvironmentRest.
:return: The name of this BuildEnvironmentRest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this BuildEnvironmentRest.
:param name: The name of this BuildEnvironmentRest.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this BuildEnvironmentRest.
:return: The description of this BuildEnvironmentRest.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BuildEnvironmentRest.
:param description: The description of this BuildEnvironmentRest.
:type: str
"""
self._description = description
@property
def system_image_repository_url(self):
"""
Gets the system_image_repository_url of this BuildEnvironmentRest.
:return: The system_image_repository_url of this BuildEnvironmentRest.
:rtype: str
"""
return self._system_image_repository_url
@system_image_repository_url.setter
def system_image_repository_url(self, system_image_repository_url):
"""
Sets the system_image_repository_url of this BuildEnvironmentRest.
:param system_image_repository_url: The system_image_repository_url of this BuildEnvironmentRest.
:type: str
"""
self._system_image_repository_url = system_image_repository_url
@property
def attributes(self):
"""
Gets the attributes of this BuildEnvironmentRest.
:return: The attributes of this BuildEnvironmentRest.
:rtype: dict(str, str)
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""
Sets the attributes of this BuildEnvironmentRest.
:param attributes: The attributes of this BuildEnvironmentRest.
:type: dict(str, str)
"""
self._attributes = attributes
@property
def system_image_type(self):
"""
Gets the system_image_type of this BuildEnvironmentRest.
:return: The system_image_type of this BuildEnvironmentRest.
:rtype: str
"""
return self._system_image_type
@system_image_type.setter
def system_image_type(self, system_image_type):
"""
Sets the system_image_type of this BuildEnvironmentRest.
:param system_image_type: The system_image_type of this BuildEnvironmentRest.
:type: str
"""
allowed_values = ["DOCKER_IMAGE", "VIRTUAL_MACHINE_RAW", "VIRTUAL_MACHINE_QCOW2", "LOCAL_WORKSPACE"]
if system_image_type not in allowed_values:
raise ValueError(
"Invalid value for `system_image_type`, must be one of {0}"
.format(allowed_values)
)
self._system_image_type = system_image_type
@property
def image_repository_url(self):
"""
Gets the image_repository_url of this BuildEnvironmentRest.
:return: The image_repository_url of this BuildEnvironmentRest.
:rtype: str
"""
return self._image_repository_url
@image_repository_url.setter
def image_repository_url(self, image_repository_url):
"""
Sets the image_repository_url of this BuildEnvironmentRest.
:param image_repository_url: The image_repository_url of this BuildEnvironmentRest.
:type: str
"""
self._image_repository_url = image_repository_url
@property
def image_id(self):
"""
Gets the image_id of this BuildEnvironmentRest.
:return: The image_id of this BuildEnvironmentRest.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""
Sets the image_id of this BuildEnvironmentRest.
:param image_id: The image_id of this BuildEnvironmentRest.
:type: str
"""
self._image_id = image_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
|
"""
MultipathDevices - command ``multipath -v4 -ll``
================================================
This function converts the output of the ``multipath -v4 -ll`` command and
stores the data around each multipath device given.
Examples:
>>> mpaths = shared[MultipathDevices]
>>> len(mpaths) # Can treat the object as a list to iterate through
2
>>> mpaths[0]['alias']
'mpathg'
>>> mpaths[0]['size']
'54T'
>>> mpaths[0]['dm_name']
'dm-2'
>>> mpaths[0]['wwid']
'36f01faf000da360b0000033c528fea6d'
>>> groups = mpath[0]['path_group'] # List of path groups for this device
>>> groups[0]['status']
'active'
>>> length(groups[0]['path'])
4
>>> path0 = groups[0]['path'][0] # Each path group has an array of paths
>>> path0[1] # Paths are stored as a list of items
'sdc'
>>> path0[-1]
'running'
>>> mpaths.dms # List of device names found
['dm-2', 'dm-4', 'dm-5', 'dm-0', 'dm-0', 'dm-8', 'dm-19']
>>> mpaths.by_dm['dm-2']['alias'] # Access by device name
'mpathg'
>>> mpaths.aliases # Aliases found (again, in order)
['mpathg', 'mpathe', 'mpatha', 'mpathb']
>>> mpaths.by_alias['mpathg']['dm_name'] # Access by alias
'dm-2'
>>> mpaths.by_wwid['36f01faf000da360b0000033c528fea6d']['dm_name']
'dm-2'
"""
import re
import shlex
from insights import parser, Parser
from insights.specs import multipath__v4__ll
@parser(multipath__v4__ll)
class MultipathDevices(Parser):
"""
``multipath_-v4_ll`` command output
Example input::
===== paths list =====
uuid hcil dev dev_t pri dm_st chk_st vend/prod/rev dev_st
0:0:0:0 sda 8:0 -1 undef ready VMware,Virtual disk running
3:0:0:1 sdb 8:16 -1 undef ready IET,VIRTUAL-DISK running
4:0:0:1 sdc 8:32 -1 undef ready IET,VIRTUAL-DISK running
Oct 28 14:02:44 | *word = 0, len = 1
Oct 28 14:02:44 | *word = E, len = 1
Oct 28 14:02:44 | *word = 1, len = 1
Oct 28 14:02:44 | *word = 0, len = 1
Oct 28 14:02:44 | *word = A, len = 1
Oct 28 14:02:44 | *word = 0, len = 1
mpathg (36f01faf000da360b0000033c528fea6d) dm-2 DELL,MD36xxi
size=54T features='3 queue_if_no_path pg_init_retries 50' hwhandler='1 rdac' wp=rw
|-+- policy='round-robin 0' prio=0 status=active
| |- 12:0:0:1 sdc 8:32 active ready running
| |- 11:0:0:1 sdi 8:128 active ready running
| |- 15:0:0:1 sdo 8:224 active ready running
| `- 17:0:0:1 sdv 65:80 active ready running
`-+- policy='round-robin 0' prio=0 status=enabled
|- 13:0:0:1 sdf 8:80 active ready running
|- 14:0:0:1 sdl 8:176 active ready running
|- 16:0:0:1 sdr 65:16 active ready running
`- 18:0:0:1 sdx 65:112 active ready running
mpathe (36f01faf000da3761000004323aa6fbce) dm-4 DELL,MD36xxi
size=54T features='3 queue_if_no_path pg_init_retries 55' hwhandler='1 rdac' wp=rw
|-+- policy='round-robin 0' prio=0 status=active
| |- 13:0:0:2 sdg 8:96 active faulty running
| |- 14:0:0:2 sdm 8:192 active faulty running
| |- 16:0:0:2 sds 65:32 active faulty running
| `- 18:0:0:2 sdy 65:128 active faulty running
`-+- policy='round-robin 0' prio=0 status=enabled
|- 12:0:0:2 sdd 8:48 active faulty running
|- 11:0:0:2 sdj 8:144 active faulty running
|- 15:0:0:2 sdp 8:240 active faulty running
`- 17:0:0:2 sdw 65:96 active faulty running
36001405b1629f80d52a4c898f8856e43 dm-5 LIO-ORG ,block0_sdb
size=2.0G features='0' hwhandler='0' wp=rw
|-+- policy='service-time 0' prio=1 status=active
| `- 3:0:0:0 sdc 8:32 active ready running
`-+- policy='service-time 0' prio=1 status=enabled
`- 4:0:0:0 sdb 8:16 active ready running
Example data structure produced::
devices = [
{
"alias": "mpathg",
"wwid": "36f01faf000da360b0000033c528fea6d",
"dm_name": "dm-2",
"venprod": "DELL,MD36xxi",
"size": "54T",
"features": "3 queue_if_no_path pg_init_retries 50",
"hwhandler": "1 rdac",
"wp": "rw",
"path_group": [
{
"policy": "round-robin 0",
"prio": "0"
"status": "active"
"path": [
['12:0:0:1', 'sdc', '8:32', 'active', 'ready', 'running'],
['11:0:0:1', 'sdi', '8:128', 'active', 'ready', 'running'],
['15:0:0:1', 'sdo', '8:224', 'active', 'ready', 'running'],
['17:0:0:1', 'sdv', '65:80', 'active', 'ready', 'running']
]
}, {
"policy": "round-robin 0",
"prio": "0"
"status": "enabled"
"path": [
['13:0:0:1', 'sdf', '8:80', 'active', 'ready', 'running'],
['14:0:0:1', 'sdl', '8:176', 'active', 'ready', 'running'],
['16:0:0:1', 'sdr', '65:16', 'active', 'ready', 'running'],
['18:0:0:1', 'sdx', '65:112','active', 'ready', 'running']
]
}
]
},...
]
Attributes:
devices (list): List of devices found, in order
dms (list): Device mapper names of each device, in order found
aliases (list): Alias of each device, in order found
wwids (list): World Wide ID
by_dm (dict): Access to each device by device mapper name
by_alias (dict): Access to each device by alias
by_wwid (dict): Access to each device by World Wide ID
"""
def parse_content(self, content):
self.devices = []
mpath_dev_all = []
mpath_dev = {}
path_info = []
path_group = []
path_group_attr = {}
MPATH_WWID_REG = re.compile(r'\(?([A-Za-z0-9_\s]+)\)?\s+dm-')
PROPERTY_SQBRKT_REG = re.compile(r"\[(?P<key>\w+)=(?P<value>[^\]]+)\]")
PATHGROUP_POLICY_STR = \
r"(?:policy=')?(?P<policy>(?:round-robin|queue-length|service-time) \d)" + \
r"(?:' | \[)prio=(?P<priority>\d+)(?:\]\[| status=)" + \
r"(?P<status>\w+)(?:\]|)"
PATHGROUP_POLICY_REG = re.compile(PATHGROUP_POLICY_STR)
HCTL_REG = re.compile(r'(?:[`|]-(?:\+-)?|\\_) (\d+:){3}\d+')
for line in content:
m = MPATH_WWID_REG.search(line)
if m:
# Save previous path group info if we have any:
# Now that we've got a valid path, append the group data if we
# haven't already
if path_info:
path_group_attr['path'] = path_info
path_group.append(path_group_attr)
# Must reset path group info to not carry on into new device
path_group_attr = {}
path_info = []
mpath_dev['path_group'] = path_group
mpath_dev_all.append(mpath_dev)
mpath_dev = {}
path_group = []
wwid = m.group(1)
no_alias = line.startswith(wwid)
(dm, venprod) = re.findall(r".*(dm-\S+)\s+(.*)", line)[0]
if not no_alias:
(dm, venprod) = re.findall(r"\w+\s+\(.*\)\s+(dm-\S+)\s+(.*)", line)[0]
mpath_dev['alias'] = line.split()[0]
mpath_dev['wwid'] = wwid
mpath_dev['dm_name'] = dm
mpath_dev['venprod'] = venprod
elif 'size=' in line:
if '][' in line:
# Old RHEL 5 format:
for (k, v) in PROPERTY_SQBRKT_REG.findall(line):
mpath_dev[k] = v
# Handle un-named write policy attribute on the end:
mpath_dev['wp'] = line[-3:-1]
else:
# Newer RHEL 6 format:
attr_line = shlex.split(line)
for item in attr_line:
(k, v) = item.split('=', 1)
mpath_dev[k] = v
elif PATHGROUP_POLICY_REG.search(line):
m = PATHGROUP_POLICY_REG.search(line)
# New path info - save the previous if we have one:
if path_info:
path_group_attr['path'] = path_info
path_group.append(path_group_attr)
path_group_attr = {}
path_info = []
path_group_attr['policy'] = m.group('policy')
path_group_attr['prio'] = m.group('priority')
path_group_attr['status'] = m.group('status')
elif HCTL_REG.search(line):
colon_index = line.index(":")
# Dodgy hack to convert RHEL 5 attributes in square brackets into
# spaced out words to combine into the list
line = line.replace('[', ' ').replace(']', ' ')
path_info.append(line[colon_index - 2:].split())
# final save of outstanding path and path group data:
if path_info:
path_group_attr['path'] = path_info
path_group.append(path_group_attr)
if path_group:
mpath_dev['path_group'] = path_group
mpath_dev_all.append(mpath_dev)
self.devices = mpath_dev_all
# Create some extra accessor properties
self.dms = [path['dm_name'] for path in self.devices if 'dm_name' in path]
self.by_dm = dict((path['dm_name'], path) for path in self.devices if 'dm_name' in path)
self.aliases = [path['alias'] for path in self.devices if 'alias' in path]
self.by_alias = dict((path['alias'], path) for path in self.devices if 'alias' in path)
self.wwids = [path['wwid'] for path in self.devices if 'wwid' in path]
self.by_wwid = dict((path['wwid'], path) for path in self.devices if 'wwid' in path)
def __len__(self):
"""
The length of the devices list
"""
return len(self.devices)
def __iter__(self):
"""
Iterate through the devices list
"""
for device in self.devices:
yield device
def __getitem__(self, idx):
"""
Fetch a device by index in devices list
"""
return self.devices[idx]
@parser(multipath__v4__ll)
def get_multipath_v4_ll(context):
return MultipathDevices(context).devices
|
|
__author__ = 'Elahe'
import sqlite3 as lite
import csv
import numpy as np
import ephem
import NightDataGenerator as ndg
''' Connect to the FBDE data base '''
def DBreadNwrite(key, Date, Site, **keyword_parameters):
'''**keyword_parameters
sessionID
'''
if key == 'w':
FBDEcon = lite.connect('MafFBDE.db')
FBDEcur = FBDEcon.cursor()
Watch = np.load("Output/Watch{}.npy".format(int(ephem.julian_date(Date))))
Schedule = np.load("Output/Schedule{}.npy".format(int(ephem.julian_date(Date))))
Schedule['ephemDate'] += 15019.5
Summary = np.load("Output/Summary{}.npy".format(int(ephem.julian_date(Date))))
Summary[0] += 15019.5; Summary[1] += 15019.5
Conf = np.load('NightDataInLIS/Config{}.npy'.format(int(ephem.julian_date(Date))))
# 3 by n_fields matrix of ID, RA, Dec
all_fields = np.loadtxt("NightDataInLIS/Constants/fieldID.lis", dtype = np.dtype([('id', 'i4'), ('ra', 'f8'), ('dec','f8')]))
N_visits = np.count_nonzero(Schedule['Field_id'])
sessionID = 0; sessionUser = 0; sessionHost = 0; sessionDate = 0; version = 0; runComment = 0
if('sessionID' in keyword_parameters):
sessionID = keyword_parameters['sessionID']
if('sessionUser' in keyword_parameters):
sessionUser = keyword_parameters['sessionUser']
if('sessionHost' in keyword_parameters):
sessionHost = keyword_parameters['sessionHost']
if('sessionDate' in keyword_parameters):
sessionDate = keyword_parameters['sessionDate']
if('version' in keyword_parameters):
version = keyword_parameters['version']
if('runComment' in keyword_parameters):
runComment = keyword_parameters['runComment']
FBDEcur.execute('CREATE TABLE IF NOT EXISTS Config ('
'configID INTEGER PRIMARY KEY, '
'moduleName TEXT, '
'paramIndex INTEGER, '
'paramName TEXT, '
'paramValue TEXT, '
'comment TEXT, '
'Session_sessionID INTEGER, '
'nonPropID INTEGER)')
with open('NightDataInLIS/Constants/conf.dmp','rb') as fin:
dr = csv.DictReader(fin) # comma is default delimiter
to_db = [(i['configID'], i['moduleName'], i['paramIndex'], i['paramName'], i['paramValue'], i['comment'], sessionID, i['nonPropID']) for i in dr]
FBDEcur.executemany("INSERT INTO Config (configID, moduleName, paramIndex, paramName, paramValue, comment, Session_sessionID, nonPropID) VALUES (?, ?, ?, ?, ?, ?, ?, ?);", to_db)
FBDEcon.commit()
FBDEcur.execute('CREATE TABLE IF NOT EXISTS Session ('
'sessionID INTEGER PRIMARY KEY, '
'sessionUser TEXT, '
'sessionHost TEXT, '
'sessionDate TEXT, '
'version TEXT, '
'runComment TEXT)')
try:
FBDEcur.execute('INSERT INTO Session VALUES (?, ?, ?, ?, ?, ?)',
(sessionID, sessionUser, sessionHost, sessionDate, version, runComment))
except:
pass
FBDEcur.execute('CREATE TABLE IF NOT EXISTS ObsHistory ('
'obsHistID INTEGER PRIMARY KEY, '
'Session_sessionID INTEGER, '
'filter TEXT, '
'expDate INTEGER, '
'expMJD REAL, '
'night INTEGER, '
'visitTime REAL, '
'visitExpTime REAL, '
'finRank REAL, '
'finSeeing REAL, '
'transparency REAL, '
'airmass REAL, '
'vSkyBright REAL, '
'filtSkyBrightness REAL, '
'rotSkyPos REAL, '
'lst REAL, '
'altitude REAL, '
'azimuth REAL, '
'dist2Moon REAL, '
'solarElong REAL, '
'moonRA REAL, '
'moonDec REAL, '
'moonAlt REAL, '
'moonAZ REAL, '
'moonPhase REAL, '
'sunAlt REAL, '
'sunAZ REAL, '
'phaseAngle REAL, '
'rScatter REAL, '
'mieScatter REAL, '
'moonIllum REAL, '
'moonBright REAL, '
'darkBright REAL, '
'rawSeeing REAL, '
'wind REAL, '
'humidity REAL, '
'fiveSigmaDepth REAL, '
'ditheredRA REAL, '
'ditheredDec REAL, '
'Field_fieldID INTEGER)')
obsHistID = 0; Session_sessionID = sessionID; filter = 0; expDate = 0; expMJD = 0; night = 0
visitTime = float(Conf['visitTime'])
visitExpTime = float(Conf['visitExpTime'])
finRank = 0; finSeeing = 0; transparency = 0; airmass = 0
vSkyBright = 0; filtSkyBrightness = 0
rotSkyPos = 0; lst = 0; altitude = 0; azimuth = 0; dist2Moon = 0; solarElong = 0; moonRA = 0
moonDec = 0; moonAlt = 0; moonAZ = 0; moonPhase = 0; sunAlt = 0; sunAZ = 0; phaseAngle = 0
rScatter = 0; mieScatter = 0; moonIllum = 0; moonBright = 0; darkBright = 0; rawSeeing = 0; wind = 0
humidity = 0; fiveSigmaDepth = 0; ditheredRA = 0; ditheredDec = 0; Field_fieldID = 0
FBDEcur.execute('CREATE TABLE IF NOT EXISTS SlewHistory ('
'slewID INTEGER PRIMARY KEY, '
'slewCount INTEGER, '
'startDate REAL, '
'endDate REAL, '
'slewTime REAL, '
'slewDist REAL, '
'ObsHistory_obsHistID INTEGER, '
'ObsHistory_Session_sessionID INTEGER)')
slewID = 0; slewCount = 0; startDate = 0; endDate = 0; slewTime = 0; slewDist = 0; ObsHistory_obsHistID = 0
ObsHistory_Session_sessionID = 0
try:
FBDEcur.execute('SELECT * FROM ObsHistory ORDER BY SlewHistory DESC LIMIT 1')
last_row_sch = FBDEcur.fetchone()
slewCount = last_row_sch[0]
except:
pass
try:
FBDEcur.execute('SELECT * FROM ObsHistory ORDER BY obsHistID DESC LIMIT 1')
last_row_sch = FBDEcur.fetchone()
obsHistID = last_row_sch[0]
except:
pass
try:
FBDEcur.execute('SELECT * FROM ObsHistory ORDER BY night DESC LIMIT 1')
last_row_ns = FBDEcur.fetchone()
night = last_row_ns[0]
except:
pass
night += 1
source = ephem.FixedBody()
prev_field = ephem.FixedBody()
moon = ephem.Moon()
sun = ephem.Sun()
for index in range(N_visits):
obsHistID += 1
slewCount += 1
Field_fieldID = Schedule[index]['Field_id']
expMJD = Schedule[index]['ephemDate'] - visitTime * ephem.second
expDate = (expMJD - 59560 + 15019.5)/ ephem.second
filter = Schedule[index]['Filter']
### Astro parameters
Site.date = expMJD - 15019.5
eq = ephem.Equatorial(np.radians(all_fields['ra'][Field_fieldID -1]), np.radians(all_fields['dec'][Field_fieldID -1]))
source._ra = eq.ra
source._dec = eq.dec
source._epoch = eq.epoch
source.compute(Site)
altitude = source.alt
azimuth = source.az
airmass = ndg.secz(altitude)
moon.compute(Site)
dist2Moon = ephem.separation(moon,source)
moonRA = moon.ra
moonDec = moon.dec
moonAlt = moonAlt
moonAZ = moon.az
moonPhase = moon.phase
sun.compute(Site)
sunAlt = sun.alt
sunAZ =sun.az
try:
slewDist = ephem.separation(ssource, prev_field)
except:
slewDist = 0
prev_field = source
n_ton = Schedule[index]['n_ton']
n_last = Schedule[index]['n_last']
Cost = Schedule[index]['Cost']
t_since_v_ton = Schedule[index]['t_since_v_ton']
t_since_v_last= Schedule[index]['t_since_v_last']
Alt = Schedule[index]['Alt']
HA = Schedule[index]['HA']
t_to_invis = Schedule[index]['t_to_invis']
Sky_bri = Schedule[index]['Sky_bri']
Temp_coverage = Schedule[index]['Temp_coverage']
slewTime = Schedule[index]['Slew_t']
startDate = expMJD + visitTime * ephem.second
endDate = startDate + slewTime
FBDEcur.execute('INSERT INTO ObsHistory VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?,'
'?, ?, ?, ?, ?, ?, ?, ?, ?, ?,'
'?, ?, ?, ?, ?, ?, ?, ?, ?, ?,'
'?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(obsHistID, Session_sessionID, filter, expDate, expMJD, night, visitTime, visitExpTime,
finRank, finSeeing, transparency, airmass, vSkyBright, filtSkyBrightness, rotSkyPos,
lst, altitude, azimuth, dist2Moon, solarElong, moonRA, moonDec, moonAlt, moonAZ,
moonPhase,sunAlt, sunAZ, phaseAngle, rScatter, mieScatter, moonIllum, moonBright,
darkBright, rawSeeing, wind, humidity, fiveSigmaDepth, ditheredRA, ditheredDec, Field_fieldID))
FBDEcur.execute('INSERT INTO SlewHistory VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(slewCount, slewCount, startDate, endDate, slewTime, slewDist,
ObsHistory_obsHistID, ObsHistory_Session_sessionID))
FBDEcur.execute('CREATE TABLE Field ('
'fieldID INTEGER PRIMARY KEY, '
'fieldFov REAL, '
'fieldRA REAL, '
'fieldDec REAL, '
'fieldGL REAL, '
'fieldGB REAL, '
'fieldEL REAL, '
'fieldEB REAL)')
fieldID = all_fields['id']; fieldRA = all_fields['ra']; fieldDec = all_fields['dec']
fieldFov= 0; fieldGL = 0; fieldGB = 0; fieldEL = 0; fieldEB = 0
for id,ra,dec in zip(fieldID, fieldRA, fieldDec):
FBDEcur.execute('INSERT INTO Field VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(int(id), ra, dec, fieldFov, fieldGL, fieldGB, fieldEL, fieldEB))
FBDEcur.execute('CREATE TABLE Proposal ('
'propID INTEGER PRIMARY KEY, '
'propConf TEXT, '
'propName TEXT, '
'objectID INTEGER, '
'objectHost TEXT, '
'Session_sessionID INTEGER)')
propID = 0; propConf = 0; propName = 0; objectID = 0; objectHost = 0; Session_sessionID =0;
if('propID' in keyword_parameters):
propID = keyword_parameters['propID']
if('propConf' in keyword_parameters):
propConf = keyword_parameters['propConf']
if('propName' in keyword_parameters):
propName = keyword_parameters['propName']
if('objectID' in keyword_parameters):
objectID = keyword_parameters['objectID']
if('objectHost' in keyword_parameters):
objectHost = keyword_parameters['objectHost']
if('Session_sessionID' in keyword_parameters):
Session_sessionID = keyword_parameters['Session_sessionID']
FBDEcur.execute('INSERT INTO Proposal VALUES (?, ?, ?, ?, ?, ?)',
(propID, propConf, propName, objectID, objectHost, Session_sessionID))
FBDEcur.execute('CREATE TABLE SeqHistory ('
'sequenceID INTEGER PRIMARY KEY, '
'startDate INTEGER, '
'expDate INTEGER, '
'seqnNum INTEGER, '
'completion REAL, '
'reqEvents INTEGER, '
'actualEvents INTEGER, '
'endStatus INTEGER, '
'parent_sequenceID INTEGER, '
'Field_fieldID INTEGER, '
'Session_sessionID INTEGER, '
'Proposal_propID INTEGER)')
sequenceID = 0; startDate =0; expDate = 0; seqnNum = 0; completion =0; reqEvents = 0; actualEvents =0
endStatus = 0; parent_sequenceID = 0; Field_fieldID = 0; Session_sessionID = Session_sessionID; Proposal_propID = 0
FBDEcur.execute('INSERT INTO SeqHistory VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(sequenceID, startDate, expDate, seqnNum, completion, reqEvents, actualEvents,
endStatus, parent_sequenceID, Field_fieldID, Session_sessionID, Proposal_propID))
FBDEcur.execute('CREATE TABLE SlewActivities ('
'slewActivityID INTEGER PRIMARY KEY, '
'activity TEXT, '
'actDelay REAL, '
'inCriticalPath TEXT, '
'SlewHistory_slewID INTEGER)')
slewActivityID = 0; activity = 0; actDelay = 0; inCriticalPath = 0; SlewHistory_slewID = 0
FBDEcur.execute('INSERT INTO SlewActivities VALUES (?, ?, ?, ?, ?)',(slewActivityID, activity, actDelay, inCriticalPath, SlewHistory_slewID))
FBDEcur.execute('CREATE TABLE SlewState ('
'slewIniStatID INTEGER PRIMARY KEY, '
'slewStateDate REAL, '
'tra REAL, '
'tdec REAL, '
'tracking TEXT, '
'alt REAL, '
'az REAL, '
'pa REAL, '
'domAlt REAL, '
'domAz REAL, '
'telAlt REAL, '
'telAz REAL, '
'rotTelPos REAL, '
'filter TEXT, '
'state INTEGER, '
'SlewHistory_slewID INTEGER)')
FBDEcur.execute('CREATE TABLE SlewMaxSpeeds ('
'slewMaxSpeedID INTEGER PRIMARY KEY, '
'domAltSpd REAL, '
'domAzSpd REAL, '
'telAltSpd REAL, '
'telAzSpd REAL, '
'rotSpd REAL, '
'SlewHistory_slewID INTEGER)')
FBDEcur.execute('CREATE TABLE TimeHistory ('
'timeHistID INTEGER PRIMARY KEY, '
'date INTEGER, '
'mjd REAL, '
'night INTEGER, '
'event INTEGER, '
'Session_sessionID INTEGER)')
FBDEcur.execute('CREATE TABLE ObsHistory_Proposal ('
'obsHistory_propID INTEGER PRIMARY KEY, '
'Proposal_propID INTEGER, '
'propRank REAL, '
'ObsHistory_obsHistID INTEGER, '
'ObsHistory_Session_sessionID INTEGER)')
FBDEcur.execute('CREATE TABLE Cloud ('
'cloudID INTEGER PRIMARY KEY, '
'c_date INTEGER, '
'cloud REAL)')
FBDEcur.execute('CREATE TABLE Seeing ('
'seeingID INTEGER PRIMARY KEY, '
's_date INTEGER, '
'seeing REAL)')
FBDEcur.execute('CREATE TABLE Log ('
'logID INTEGER PRIMARY KEY, '
'log_name TEXT, '
'log_value TEXT, '
'Session_sessionID INTEGER)')
FBDEcur.execute('CREATE TABLE Config_File ('
'config_fileID INTEGER PRIMARY KEY, '
'filename TEXT, '
'data TEXT, '
'Session_sessionID INTEGER)')
FBDEcur.execute('CREATE TABLE Proposal_Field ('
'proposal_field_id INTEGER PRIMARY KEY, '
'Session_sessionID INTEGER, '
'Proposal_propID INTEGER, '
'Field_fieldID INTEGER)')
FBDEcur.execute('CREATE TABLE SeqHistory_ObsHistory ('
'seqhistory_obsHistID INTEGER PRIMARY KEY, '
'SeqHistory_sequenceID INTEGER, '
'ObsHistory_obsHistID INTEGER, '
'ObsHistory_Session_sessionID INTEGER)')
FBDEcur.execute('CREATE TABLE MissedHistory ('
'missedHistID INTEGER PRIMARY KEY, '
'Session_sessionID INTEGER, '
'filter TEXT, expDate INTEGER, '
'expMJD REAL, '
'night INTEGER, '
'lst REAL, '
'Field_fieldID INTEGER)')
FBDEcur.execute('CREATE TABLE SeqHistory_MissedHistory ('
'seqhistory_missedHistID INTEGER PRIMARY KEY, '
'SeqHistory_sequenceID INTEGER, '
'MissedHistory_missedHistID INTEGER, '
'MissedHistory_Session_sessionID INTEGER)')
FBDEcur.execute('CREATE TABLE Summary ('
'obsHistID INTEGER, '
'sessionID INTEGER, '
'propID INTEGER, '
'fieldID INTEGER, '
'fieldRA REAL, '
'fieldDec REAL, '
'filter TEXT, '
'expDate INTEGER, '
'expMJD REAL, '
'night INTEGER, '
'visitTime REAL, '
'visitExpTime REAL, '
'finRank REAL, '
'finSeeing REAL, '
'transparency REAL, '
'airmass REAL, '
'vSkyBright REAL, '
'filtSkyBrightness REAL, '
'rotSkyPos REAL, '
'lst REAL, '
'altitude REAL, '
'azimuth REAL, '
'dist2Moon REAL, '
'solarElong REAL, '
'moonRA REAL, '
'moonDec REAL, '
'moonAlt REAL, '
'moonAZ REAL, '
'moonPhase REAL, '
'sunAlt REAL, '
'sunAz REAL, '
'phaseAngle REAL, '
'rScatter REAL, '
'mieScatter REAL, '
'moonIllum REAL, '
'moonBright REAL, '
'darkBright REAL, '
'rawSeeing REAL, '
'wind REAL, '
'humidity REAL, '
'slewDist REAL, '
'slewTime REAL, '
'fiveSigmaDepth REAL, '
'ditheredRA REAL, '
'ditheredDec REAL)')
if key == 'r':
return
return
Site = ephem.Observer()
Site.lon = -1.2320792
Site.lat = -0.517781017
Site.elevation = 2650
Site.pressure = 0.
Site.horizon = 0.
Date = ephem.Date('2016/09/01 12:00:00.00') # times are in UT
DBreadNwrite('w', Date, Site, sessionID = 1, sessionUser= 'Elahe')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with ops.Graph().as_default(), context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution_fn, required_gpus=None,
required_tpu=False):
self._distribution_fn = distribution_fn
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution_fn()
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_distribution_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
tpu_strategy = NamedDistribution(
"TPU", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=5),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPU", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=1),
required_tpu=True)
# Note that we disable prefetching for testing since prefetching makes
# the input non-deterministic.
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(
["/gpu:0", "/cpu:0"], prefetch_on_device=False),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(
["/gpu:0", "/gpu:1"], prefetch_on_device=False),
required_gpus=2)
adam_optimizer_v1_fn = NamedObject(
"AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
optimizers_v1 = [adam_optimizer_v1_fn, gradient_descent_optimizer_v1_fn,
adagrad_optimizer_v1_fn]
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1))
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
optimizers_v2 = [adam_optimizer_v2_fn, gradient_descent_optimizer_v2_fn,
adagrad_optimizer_v2_fn]
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy, mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy, mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus
],
optimizer_fn=optimizers_v2)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from errno import EACCES, ENOENT, EPERM
from functools import reduce
from logging import getLogger
from os import listdir
from os.path import basename, dirname, join
from tarfile import ReadError
from conda._vendor.auxlib.decorators import memoizemethod
from .path_actions import CacheUrlAction, ExtractPackageAction
from .. import CondaError, CondaMultiError, conda_signal_handler
from .._vendor.auxlib.collection import first
from ..base.constants import CONDA_TARBALL_EXTENSION, PACKAGE_CACHE_MAGIC_FILE
from ..base.context import context
from ..common.compat import (JSONDecodeError, iteritems, itervalues, odict, string_types,
text_type, with_metaclass)
from ..common.constants import NULL
from ..common.io import ProgressBar, time_recorder
from ..common.path import expand, url_to_path
from ..common.signals import signal_handler
from ..common.url import path_to_url
from ..exceptions import NotWritableError
from ..gateways.disk.create import (create_package_cache_directory, extract_tarball,
write_as_json_to_file)
from ..gateways.disk.delete import rm_rf
from ..gateways.disk.read import (compute_md5sum, isdir, isfile, islink, read_index_json,
read_index_json_from_tarball, read_repodata_json)
from ..gateways.disk.test import file_path_is_writable
from ..models.dist import Dist
from ..models.match_spec import MatchSpec
from ..models.records import PackageCacheRecord, PackageRecord, PackageRef
from ..utils import human_bytes
try:
from cytoolz.itertoolz import concat, concatv, groupby
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv, groupby # NOQA
log = getLogger(__name__)
class PackageCacheType(type):
"""
This metaclass does basic caching of PackageCache instance objects.
"""
def __call__(cls, pkgs_dir):
if isinstance(pkgs_dir, PackageCacheData):
return pkgs_dir
elif pkgs_dir in PackageCacheData._cache_:
return PackageCacheData._cache_[pkgs_dir]
else:
package_cache_instance = super(PackageCacheType, cls).__call__(pkgs_dir)
PackageCacheData._cache_[pkgs_dir] = package_cache_instance
return package_cache_instance
@with_metaclass(PackageCacheType)
class PackageCacheData(object):
_cache_ = {}
def __init__(self, pkgs_dir):
self.pkgs_dir = pkgs_dir
self.__package_cache_records = None
self.__is_writable = None
self._urls_data = UrlsData(pkgs_dir)
def insert(self, package_cache_record):
meta = join(package_cache_record.extracted_package_dir, 'info', 'repodata_record.json')
write_as_json_to_file(meta, PackageRecord.from_objects(package_cache_record))
self._package_cache_records[package_cache_record] = package_cache_record
def load(self):
self.__package_cache_records = _package_cache_records = {}
self._check_writable() # called here to create the cache if it doesn't exist
if not isdir(self.pkgs_dir):
# no directory exists, and we didn't have permissions to create it
return
for base_name in self._dedupe_pkgs_dir_contents(listdir(self.pkgs_dir)):
full_path = join(self.pkgs_dir, base_name)
if islink(full_path):
continue
elif (isdir(full_path) and isfile(join(full_path, 'info', 'index.json'))
or isfile(full_path) and full_path.endswith(CONDA_TARBALL_EXTENSION)):
package_cache_record = self._make_single_record(base_name)
if package_cache_record:
_package_cache_records[package_cache_record] = package_cache_record
def reload(self):
self.load()
return self
def get(self, package_ref, default=NULL):
assert isinstance(package_ref, PackageRef)
try:
return self._package_cache_records[package_ref]
except KeyError:
if default is not NULL:
return default
else:
raise
def remove(self, package_ref, default=NULL):
if default is NULL:
return self._package_cache_records.pop(package_ref)
else:
return self._package_cache_records.pop(package_ref, default)
def query(self, package_ref_or_match_spec):
# returns a generator
param = package_ref_or_match_spec
if isinstance(param, string_types):
param = MatchSpec(param)
if isinstance(param, MatchSpec):
return (pcrec for pcrec in itervalues(self._package_cache_records)
if param.match(pcrec))
else:
assert isinstance(param, PackageRef)
return (pcrec for pcrec in itervalues(self._package_cache_records) if pcrec == param)
def iter_records(self):
return iter(self._package_cache_records)
@classmethod
def query_all(cls, package_ref_or_match_spec, pkgs_dirs=None):
if pkgs_dirs is None:
pkgs_dirs = context.pkgs_dirs
return concat(pcache.query(package_ref_or_match_spec) for pcache in concatv(
cls.writable_caches(pkgs_dirs),
cls.read_only_caches(pkgs_dirs),
))
# ##########################################################################################
# these class methods reach across all package cache directories (usually context.pkgs_dirs)
# ##########################################################################################
@classmethod
def first_writable(cls, pkgs_dirs=None):
return cls.writable_caches(pkgs_dirs)[0]
@classmethod
def writable_caches(cls, pkgs_dirs=None):
if pkgs_dirs is None:
pkgs_dirs = context.pkgs_dirs
writable_caches = tuple(filter(lambda c: c.is_writable,
(cls(pd) for pd in pkgs_dirs)))
if not writable_caches:
# TODO: raise NoWritablePackageCacheError()
raise CondaError("No writable package cache directories found in\n"
"%s" % text_type(pkgs_dirs))
return writable_caches
@classmethod
def read_only_caches(cls, pkgs_dirs=None):
if pkgs_dirs is None:
pkgs_dirs = context.pkgs_dirs
read_only_caches = tuple(filter(lambda c: not c.is_writable,
(cls(pd) for pd in pkgs_dirs)))
return read_only_caches
@classmethod
def get_all_extracted_entries(cls):
package_caches = (cls(pd) for pd in context.pkgs_dirs)
return tuple(pc_entry for pc_entry in concat(map(itervalues, package_caches))
if pc_entry.is_extracted)
@classmethod
def get_entry_to_link(cls, package_ref):
pc_entry = next((pcrec for pcrec in cls.query_all(package_ref)
if pcrec.is_extracted),
None)
if pc_entry is not None:
return pc_entry
# this can happen with `conda install path/to/package.tar.bz2`
# because dist has channel '<unknown>'
# if ProgressiveFetchExtract did its job correctly, what we're looking for
# should be the matching dist_name in the first writable package cache
# we'll search all caches for a match, but search writable caches first
caches = concatv(cls.writable_caches(), cls.read_only_caches())
dist_str = package_ref.dist_str().rsplit(':', 1)[-1]
pc_entry = next((cache._scan_for_dist_no_channel(dist_str)
for cache in caches if cache), None)
if pc_entry is not None:
return pc_entry
raise CondaError("No package '%s' found in cache directories." % Dist(package_ref))
@classmethod
def tarball_file_in_cache(cls, tarball_path, md5sum=None, exclude_caches=()):
tarball_full_path, md5sum = cls._clean_tarball_path_and_get_md5sum(tarball_path, md5sum)
pc_entry = first(cls(pkgs_dir).tarball_file_in_this_cache(tarball_full_path,
md5sum)
for pkgs_dir in context.pkgs_dirs
if pkgs_dir not in exclude_caches)
return pc_entry
@classmethod
def clear(cls):
cls._cache_.clear()
def tarball_file_in_this_cache(self, tarball_path, md5sum=None):
tarball_full_path, md5sum = self._clean_tarball_path_and_get_md5sum(tarball_path,
md5sum=md5sum)
tarball_basename = basename(tarball_full_path)
pc_entry = first((pc_entry for pc_entry in itervalues(self)),
key=lambda pce: pce.tarball_basename == tarball_basename
and pce.md5 == md5sum) # NOQA
return pc_entry
@property
def _package_cache_records(self):
# don't actually populate _package_cache_records until we need it
if self.__package_cache_records is None:
self.load()
return self.__package_cache_records
@property
def is_writable(self):
if self.__is_writable is None:
return self._check_writable()
return self.__is_writable
def _check_writable(self):
if isdir(self.pkgs_dir):
i_wri = file_path_is_writable(join(self.pkgs_dir, PACKAGE_CACHE_MAGIC_FILE))
else:
log.trace("package cache directory '%s' does not exist", self.pkgs_dir)
i_wri = create_package_cache_directory(self.pkgs_dir)
log.debug("package cache directory '%s' writable: %s", self.pkgs_dir, i_wri)
self.__is_writable = i_wri
return i_wri
def _ensure_exists(self):
if not isfile(join(self.pkgs_dir, PACKAGE_CACHE_MAGIC_FILE)):
create_package_cache_directory(self.pkgs_dir)
@staticmethod
def _clean_tarball_path_and_get_md5sum(tarball_path, md5sum=None):
if tarball_path.startswith('file:/'):
tarball_path = url_to_path(tarball_path)
tarball_full_path = expand(tarball_path)
if isfile(tarball_full_path) and md5sum is None:
md5sum = compute_md5sum(tarball_full_path)
return tarball_full_path, md5sum
def _scan_for_dist_no_channel(self, dist_str):
return next((pcrec for pcrec in self._package_cache_records
if pcrec.dist_str().rsplit(':', 1)[-1] == dist_str),
None)
def itervalues(self):
return iter(self.values())
def values(self):
return self._package_cache_records.values()
def __repr__(self):
args = ('%s=%r' % (key, getattr(self, key)) for key in ('pkgs_dir',))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
def _make_single_record(self, package_filename):
if not package_filename.endswith(CONDA_TARBALL_EXTENSION):
package_filename += CONDA_TARBALL_EXTENSION
package_tarball_full_path = join(self.pkgs_dir, package_filename)
log.trace("adding to package cache %s", package_tarball_full_path)
extracted_package_dir = package_tarball_full_path[:-len(CONDA_TARBALL_EXTENSION)]
# try reading info/repodata_record.json
try:
repodata_record = read_repodata_json(extracted_package_dir)
package_cache_record = PackageCacheRecord.from_objects(
repodata_record,
package_tarball_full_path=package_tarball_full_path,
extracted_package_dir=extracted_package_dir,
)
return package_cache_record
except (IOError, OSError, JSONDecodeError) as e:
# IOError / OSError if info/repodata_record.json doesn't exists
# JsonDecodeError if info/repodata_record.json is partially extracted or corrupted
# python 2.7 raises ValueError instead of JsonDecodeError
# ValueError("No JSON object could be decoded")
log.debug("unable to read %s\n because %r",
join(extracted_package_dir, 'info', 'repodata_record.json'), e)
# try reading info/index.json
try:
index_json_record = read_index_json(extracted_package_dir)
except (IOError, OSError, JSONDecodeError) as e:
# IOError / OSError if info/index.json doesn't exist
# JsonDecodeError if info/index.json is partially extracted or corrupted
# python 2.7 raises ValueError instead of JsonDecodeError
# ValueError("No JSON object could be decoded")
log.debug("unable to read %s\n because",
join(extracted_package_dir, 'info', 'index.json'), e)
if isdir(extracted_package_dir) and not isfile(package_tarball_full_path):
# We have a directory that looks like a conda package, but without
# (1) info/repodata_record.json or info/index.json, and (2) a conda package
# tarball, there's not much we can do. We'll just ignore it.
return None
try:
if self.is_writable:
if isdir(extracted_package_dir):
# We have a partially unpacked conda package directory. Best thing
# to do is remove it and try extracting.
rm_rf(extracted_package_dir)
try:
extract_tarball(package_tarball_full_path, extracted_package_dir)
except EnvironmentError as e:
if e.errno == ENOENT:
# FileNotFoundError(2, 'No such file or directory')
# At this point, we can assume the package tarball is bad.
# Remove everything and move on.
# see https://github.com/conda/conda/issues/6707
rm_rf(package_tarball_full_path)
rm_rf(extracted_package_dir)
try:
index_json_record = read_index_json(extracted_package_dir)
except (IOError, OSError, JSONDecodeError):
# At this point, we can assume the package tarball is bad.
# Remove everything and move on.
rm_rf(package_tarball_full_path)
rm_rf(extracted_package_dir)
return None
else:
index_json_record = read_index_json_from_tarball(package_tarball_full_path)
except (EOFError, ReadError) as e:
# EOFError: Compressed file ended before the end-of-stream marker was reached
# tarfile.ReadError: file could not be opened successfully
# We have a corrupted tarball. Remove the tarball so it doesn't affect
# anything, and move on.
log.debug("unable to extract info/index.json from %s\n because %r",
package_tarball_full_path, e)
rm_rf(package_tarball_full_path)
return None
# we were able to read info/index.json, so let's continue
if isfile(package_tarball_full_path):
md5 = compute_md5sum(package_tarball_full_path)
else:
md5 = None
url = self._urls_data.get_url(package_filename)
package_cache_record = PackageCacheRecord.from_objects(
index_json_record,
url=url,
md5=md5,
package_tarball_full_path=package_tarball_full_path,
extracted_package_dir=extracted_package_dir,
)
# write the info/repodata_record.json file so we can short-circuit this next time
if self.is_writable:
repodata_record = PackageRecord.from_objects(package_cache_record)
repodata_record_path = join(extracted_package_dir, 'info', 'repodata_record.json')
try:
write_as_json_to_file(repodata_record_path, repodata_record)
except (IOError, OSError) as e:
if e.errno in (EACCES, EPERM) and isdir(dirname(repodata_record_path)):
raise NotWritableError(repodata_record_path, e.errno, caused_by=e)
else:
raise
return package_cache_record
@staticmethod
def _dedupe_pkgs_dir_contents(pkgs_dir_contents):
# if both 'six-1.10.0-py35_0/' and 'six-1.10.0-py35_0.tar.bz2' are in pkgs_dir,
# only 'six-1.10.0-py35_0.tar.bz2' will be in the return contents
if not pkgs_dir_contents:
return []
contents = []
def _process(x, y):
if x + CONDA_TARBALL_EXTENSION != y:
contents.append(x)
return y
last = reduce(_process, sorted(pkgs_dir_contents))
_process(last, contents and contents[-1] or '')
return contents
class UrlsData(object):
# this is a class to manage urls.txt
# it should basically be thought of as a sequence
# in this class I'm breaking the rule that all disk access goes through conda.gateways
def __init__(self, pkgs_dir):
self.pkgs_dir = pkgs_dir
self.urls_txt_path = urls_txt_path = join(pkgs_dir, 'urls.txt')
if isfile(urls_txt_path):
with open(urls_txt_path, 'r') as fh:
self._urls_data = [line.strip() for line in fh]
self._urls_data.reverse()
else:
self._urls_data = []
def __contains__(self, url):
return url in self._urls_data
def __iter__(self):
return iter(self._urls_data)
def add_url(self, url):
with open(self.urls_txt_path, 'a') as fh:
fh.write(url + '\n')
self._urls_data.insert(0, url)
@memoizemethod
def get_url(self, package_path):
# package path can be a full path or just a basename
# can be either an extracted directory or tarball
package_path = basename(package_path)
if not package_path.endswith(CONDA_TARBALL_EXTENSION):
package_path += CONDA_TARBALL_EXTENSION
return first(self, lambda url: basename(url) == package_path)
# ##############################
# downloading
# ##############################
class ProgressiveFetchExtract(object):
@staticmethod
def make_actions_for_record(pref_or_spec):
assert pref_or_spec is not None
# returns a cache_action and extract_action
# if the pref or spec has an md5 value
# look in all caches for package cache record that is
# (1) already extracted, and
# (2) matches the md5
# If one exists, no actions are needed.
md5 = pref_or_spec.get('md5')
if md5:
extracted_pcrec = next((
pcrec for pcrec in concat(PackageCacheData(pkgs_dir).query(pref_or_spec)
for pkgs_dir in context.pkgs_dirs)
if pcrec.is_extracted
), None)
if extracted_pcrec:
return None, None
# there is no extracted dist that can work, so now we look for tarballs that
# aren't extracted
# first we look in all writable caches, and if we find a match, we extract in place
# otherwise, if we find a match in a non-writable cache, we link it to the first writable
# cache, and then extract
first_writable_cache = PackageCacheData.first_writable()
first_writable_cache._ensure_exists()
pcrec_from_writable_cache = next((
pcrec for pcrec in concat(pcache.query(pref_or_spec)
for pcache in PackageCacheData.writable_caches())
if pcrec.is_fetched
), None)
if pcrec_from_writable_cache:
# extract in place
extract_axn = ExtractPackageAction(
source_full_path=pcrec_from_writable_cache.package_tarball_full_path,
target_pkgs_dir=dirname(pcrec_from_writable_cache.package_tarball_full_path),
target_extracted_dirname=basename(pcrec_from_writable_cache.extracted_package_dir),
record_or_spec=pcrec_from_writable_cache,
md5sum=pcrec_from_writable_cache.md5,
)
return None, extract_axn
pcrec_from_read_only_cache = next((
pcrec for pcrec in concat(pcache.query(pref_or_spec)
for pcache in PackageCacheData.read_only_caches())
if pcrec.is_fetched
), None)
if pcrec_from_read_only_cache:
# we found a tarball, but it's in a read-only package cache
# we need to link the tarball into the first writable package cache,
# and then extract
try:
expected_size_in_bytes = pref_or_spec.size
except AttributeError:
expected_size_in_bytes = None
cache_axn = CacheUrlAction(
url=path_to_url(pcrec_from_read_only_cache.package_tarball_full_path),
target_pkgs_dir=first_writable_cache.pkgs_dir,
target_package_basename=pcrec_from_read_only_cache.fn,
md5sum=md5,
expected_size_in_bytes=expected_size_in_bytes,
)
trgt_extracted_dirname = pcrec_from_read_only_cache.fn[:-len(CONDA_TARBALL_EXTENSION)]
extract_axn = ExtractPackageAction(
source_full_path=cache_axn.target_full_path,
target_pkgs_dir=first_writable_cache.pkgs_dir,
target_extracted_dirname=trgt_extracted_dirname,
record_or_spec=pcrec_from_read_only_cache,
md5sum=pcrec_from_read_only_cache.md5,
)
return cache_axn, extract_axn
# if we got here, we couldn't find a matching package in the caches
# we'll have to download one; fetch and extract
url = pref_or_spec.get('url')
assert url
try:
expected_size_in_bytes = pref_or_spec.size
except AttributeError:
expected_size_in_bytes = None
cache_axn = CacheUrlAction(
url=url,
target_pkgs_dir=first_writable_cache.pkgs_dir,
target_package_basename=pref_or_spec.fn,
md5sum=md5,
expected_size_in_bytes=expected_size_in_bytes,
)
extract_axn = ExtractPackageAction(
source_full_path=cache_axn.target_full_path,
target_pkgs_dir=first_writable_cache.pkgs_dir,
target_extracted_dirname=pref_or_spec.fn[:-len(CONDA_TARBALL_EXTENSION)],
record_or_spec=pref_or_spec,
md5sum=md5,
)
return cache_axn, extract_axn
def __init__(self, link_prefs):
"""
Args:
link_prefs (Tuple[PackageRef]):
A sequence of :class:`PackageRef`s to ensure available in a known
package cache, typically for a follow-on :class:`UnlinkLinkTransaction`.
Here, "available" means the package tarball is both downloaded and extracted
to a package directory.
"""
self.link_precs = link_prefs
log.debug("instantiating ProgressiveFetchExtract with\n"
" %s\n", '\n '.join(pkg_rec.dist_str() for pkg_rec in link_prefs))
self.paired_actions = odict() # Map[pref, Tuple(CacheUrlAction, ExtractPackageAction)]
self._prepared = False
self._executed = False
@time_recorder("fetch_extract_prepare")
def prepare(self):
if self._prepared:
return
self.paired_actions.update((prec, self.make_actions_for_record(prec))
for prec in self.link_precs)
self._prepared = True
@property
def cache_actions(self):
return tuple(axns[0] for axns in itervalues(self.paired_actions) if axns[0])
@property
def extract_actions(self):
return tuple(axns[1] for axns in itervalues(self.paired_actions) if axns[1])
def execute(self):
if self._executed:
return
if not self._prepared:
self.prepare()
assert not context.dry_run
if not self.cache_actions or not self.extract_actions:
return
if not context.verbosity and not context.quiet and not context.json:
# TODO: use logger
print("\nDownloading and Extracting Packages")
else:
log.debug("prepared package cache actions:\n"
" cache_actions:\n"
" %s\n"
" extract_actions:\n"
" %s\n",
'\n '.join(text_type(ca) for ca in self.cache_actions),
'\n '.join(text_type(ea) for ea in self.extract_actions))
exceptions = []
with signal_handler(conda_signal_handler), time_recorder("fetch_extract_execute"):
for prec_or_spec, prec_actions in iteritems(self.paired_actions):
exc = self._execute_actions(prec_or_spec, prec_actions)
if exc:
log.debug('%r', exc, exc_info=True)
exceptions.append(exc)
if exceptions:
raise CondaMultiError(exceptions)
self._executed = True
@staticmethod
def _execute_actions(prec_or_spec, actions):
cache_axn, extract_axn = actions
if cache_axn is None and extract_axn is None:
return
desc = ''
if prec_or_spec.name and prec_or_spec.version:
desc = "%s-%s" % (prec_or_spec.name or '', prec_or_spec.version or '')
size = getattr(prec_or_spec, 'size', None)
size_str = size and human_bytes(size) or ''
if len(desc) > 0:
desc = "%-20.20s | " % (desc)
if len(size_str) > 0:
desc += "%-9s | " % (size_str)
progress_bar = ProgressBar(desc, not context.verbosity and not context.quiet, context.json)
download_total = 0.75 # fraction of progress for download; the rest goes to extract
try:
if cache_axn:
cache_axn.verify()
if not cache_axn.url.startswith('file:/'):
def progress_update_cache_axn(pct_completed):
progress_bar.update_to(pct_completed * download_total)
else:
download_total = 0
progress_update_cache_axn = None
cache_axn.execute(progress_update_cache_axn)
if extract_axn:
extract_axn.verify()
def progress_update_extract_axn(pct_completed):
progress_bar.update_to((1 - download_total) * pct_completed + download_total)
extract_axn.execute(progress_update_extract_axn)
except Exception as e:
if extract_axn:
extract_axn.reverse()
if cache_axn:
cache_axn.reverse()
return e
else:
if cache_axn:
cache_axn.cleanup()
if extract_axn:
extract_axn.cleanup()
progress_bar.finish()
finally:
progress_bar.close()
def __hash__(self):
return hash(self.link_precs)
def __eq__(self, other):
return hash(self) == hash(other)
# ##############################
# backward compatibility
# ##############################
def rm_fetched(dist):
"""
Checks to see if the requested package is in the cache; and if so, it removes both
the package itself and its extracted contents.
"""
# in conda/exports.py and conda_build/conda_interface.py, but not actually
# used in conda-build
raise NotImplementedError()
def download(url, dst_path, session=None, md5=None, urlstxt=False, retries=3):
from ..gateways.connection.download import download as gateway_download
gateway_download(url, dst_path, md5)
class package_cache(object):
def __contains__(self, dist):
return bool(PackageCacheData.first_writable().get(Dist(dist).to_package_ref(), None))
def keys(self):
return (Dist(v) for v in itervalues(PackageCacheData.first_writable()))
def __delitem__(self, dist):
PackageCacheData.first_writable().remove(Dist(dist).to_package_ref())
|
|
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined
otherwise.
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
from __future__ import division, absolute_import, print_function
import sys
import re
import pydoc
import sphinx
import inspect
import collections
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
from .docscrape_sphinx import get_doc_object, SphinxDocString
from . import __version__
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
def rename_references(app, what, name, obj, options, lines,
reference_offset=[0]):
# replace reference numbers so that there are no duplicates
references = set()
for line in lines:
line = line.strip()
m = re.match(sixu('^.. \\[(%s)\\]') % app.config.numpydoc_citation_re,
line, re.I)
if m:
references.add(m.group(1))
if references:
for r in references:
if r.isdigit():
new_r = sixu("R%d") % (reference_offset[0] + int(r))
else:
new_r = sixu("%s%d") % (r, reference_offset[0])
for i, line in enumerate(lines):
lines[i] = lines[i].replace(sixu('[%s]_') % r,
sixu('[%s]_') % new_r)
lines[i] = lines[i].replace(sixu('.. [%s]') % r,
sixu('.. [%s]') % new_r)
reference_offset[0] += len(references)
DEDUPLICATION_TAG = ' !! processed by numpydoc !!'
def mangle_docstrings(app, what, name, obj, options, lines):
if DEDUPLICATION_TAG in lines:
return
cfg = {'use_plots': app.config.numpydoc_use_plots,
'use_blockquotes': app.config.numpydoc_use_blockquotes,
'show_class_members': app.config.numpydoc_show_class_members,
'show_inherited_class_members':
app.config.numpydoc_show_inherited_class_members,
'class_members_toctree': app.config.numpydoc_class_members_toctree,
'attributes_as_param_list':
app.config.numpydoc_attributes_as_param_list}
u_NL = sixu('\n')
if what == 'module':
# Strip top title
pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'
title_re = re.compile(sixu(pattern), re.I | re.S)
lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL)
else:
doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg,
builder=app.builder)
if sys.version_info[0] >= 3:
doc = str(doc)
else:
doc = unicode(doc)
lines[:] = doc.split(u_NL)
if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and
obj.__name__):
if hasattr(obj, '__module__'):
v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [sixu(''), sixu('.. htmlonly::'), sixu('')]
lines += [sixu(' %s') % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# call function to replace reference numbers so that there are no
# duplicates
rename_references(app, what, name, obj, options, lines)
lines += ['..', DEDUPLICATION_TAG]
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (isinstance(obj, collections.Callable) or
hasattr(obj, '__argspec_is_invalid_')):
return
if not hasattr(obj, '__doc__'):
return
doc = SphinxDocString(pydoc.getdoc(obj))
sig = doc['Signature'] or getattr(obj, '__text_signature__', None)
if sig:
sig = re.sub(sixu("^[^(]*"), sixu(""), sig)
return sig, sixu('')
def setup(app, get_doc_object_=get_doc_object):
if not hasattr(app, 'add_config_value'):
return # probably called by nose, better bail out
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_use_blockquotes', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
app.add_config_value('numpydoc_show_inherited_class_members', True, True)
app.add_config_value('numpydoc_class_members_toctree', True, True)
app.add_config_value('numpydoc_citation_re', '[a-z0-9_.-]+', True)
app.add_config_value('numpydoc_attributes_as_param_list', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
app.setup_extension('sphinx.ext.autosummary')
metadata = {'version': __version__,
'parallel_read_safe': True}
return metadata
# ------------------------------------------------------------------------------
# Docstring-mangling domains
# ------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in list(self.directive_mangling_map.items()):
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
indices = []
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def match_items(lines, content_old):
"""Create items for mangled lines.
This function tries to match the lines in ``lines`` with the items (source
file references and line numbers) in ``content_old``. The
``mangle_docstrings`` function changes the actual docstrings, but doesn't
keep track of where each line came from. The manging does many operations
on the original lines, which are hard to track afterwards.
Many of the line changes come from deleting or inserting blank lines. This
function tries to match lines by ignoring blank lines. All other changes
(such as inserting figures or changes in the references) are completely
ignored, so the generated line numbers will be off if ``mangle_docstrings``
does anything non-trivial.
This is a best-effort function and the real fix would be to make
``mangle_docstrings`` actually keep track of the ``items`` together with
the ``lines``.
Examples
--------
>>> lines = ['', 'A', '', 'B', ' ', '', 'C', 'D']
>>> lines_old = ['a', '', '', 'b', '', 'c']
>>> items_old = [('file1.py', 0), ('file1.py', 1), ('file1.py', 2),
... ('file2.py', 0), ('file2.py', 1), ('file2.py', 2)]
>>> content_old = ViewList(lines_old, items=items_old)
>>> match_items(lines, content_old) # doctest: +NORMALIZE_WHITESPACE
[('file1.py', 0), ('file1.py', 0), ('file2.py', 0), ('file2.py', 0),
('file2.py', 2), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2)]
>>> # first 2 ``lines`` are matched to 'a', second 2 to 'b', rest to 'c'
>>> # actual content is completely ignored.
Notes
-----
The algorithm tries to match any line in ``lines`` with one in
``lines_old``. It skips over all empty lines in ``lines_old`` and assigns
this line number to all lines in ``lines``, unless a non-empty line is
found in ``lines`` in which case it goes to the next line in ``lines_old``.
"""
items_new = []
lines_old = content_old.data
items_old = content_old.items
j = 0
for i, line in enumerate(lines):
# go to next non-empty line in old:
# line.strip() checks whether the string is all whitespace
while j < len(lines_old) - 1 and not lines_old[j].strip():
j += 1
items_new.append(items_old[j])
if line.strip() and j < len(lines_old) - 1:
j += 1
assert(len(items_new) == len(lines))
return items_new
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
if self.content:
items = match_items(lines, self.content)
self.content = ViewList(lines, items=items,
parent=self.content.parent)
return base_directive.run(self)
return directive
|
|
#
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
from time import sleep
import gridfs
from pymongo import MongoClient
from common.oopatterns import Singleton
from irma.common.exceptions import IrmaDatabaseError
log = logging.getLogger(__name__)
def retry_connect(func):
"""Decorator for NoSQLDatabase to retry connecting automatically
"""
def wrapper(instance, *args, **kwargs):
if isinstance(instance, NoSQLDatabase):
if not instance._is_connected():
try:
instance._connect()
return func(instance, *args, **kwargs)
except IrmaDatabaseError as e:
raise e
return func(instance, *args, **kwargs)
else:
raise NotImplementedError()
return wrapper
# TODO: Create an abstract class so we can use multiple databases,
# not only mongodb
class NoSQLDatabase(Singleton):
"""Internal database.
This class handles the creation of the internal database and provides some
functions for interacting with it.
"""
# ==================================
# Constructor and Destructor stuff
# ==================================
def __init__(self, db_name, db_uri):
# TODO: Get defaults from configuration file
self._db_name = db_name
self._db_uri = db_uri
self._db_conn = None
self._db_cache = dict()
self._coll_cache = dict()
self._connect()
def __del__(self):
if self._db_conn:
self._disconnect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__del__()
# =================
# Private methods
# =================
def _connect(self):
if self._db_conn:
log.warn("Already connected to database")
try:
self._db_conn = MongoClient(self._db_uri)
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
def _disconnect(self):
if not self._db_conn:
return
try:
self._db_conn.close()
self._db_conn = None
self._db_cache = dict()
self._coll_cache = dict()
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
def _database(self, db_name):
if db_name not in self._db_cache:
try:
self._db_cache[db_name] = self._db_conn[db_name]
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
return self._db_cache[db_name]
def _table(self, db_name, coll_name):
database = self._database(db_name)
# TODO: Fix collision if two tables
# from different databases have the same name
if coll_name not in self._coll_cache:
try:
self._coll_cache[coll_name] = database[coll_name]
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
return self._coll_cache[coll_name]
def _is_connected(self):
return self._db_conn is not None
# ================
# Public methods
# ================
def db_instance(self):
return self._db_conn
@retry_connect
def load(self, db_name, collection_name, _id):
""" load entry _id in collection"""
collection = self._table(db_name, collection_name)
try:
res = collection.find_one({'_id': _id})
return res
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def exists(self, db_name, collection_name, _id):
""" check if entry with _id is in collection"""
collection = self._table(db_name, collection_name)
try:
res = collection.find_one({'_id': _id})
return res is not None
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def save(self, db_name, collection_name, dict_object):
""" save entry in collection"""
collection = self._table(db_name, collection_name)
try:
_id = collection.save(dict_object)
return _id
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def update(self, db_name, collection_name, _id, update_dict):
"""
Update entries in collection according to
the dictionnary specified
"""
collection = self._table(db_name, collection_name)
try:
collection.update({"_id": _id}, {"$set": update_dict})
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def remove(self, db_name, collection_name, _id):
""" Delete entry in collection according to the given id"""
collection = self._table(db_name, collection_name)
try:
collection.remove({'_id': _id})
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def find(self, db_name, collection_name, *args, **kwargs):
""" Returns elements from the collection according to the given query
:param db_name: The database
:param collection_name: The name of the collection
:param *args **kwargs: see
http://api.mongodb.org/python/current/api/pymongo/collection.html#\
pymongo.collection.Collection.find
and http://docs.mongodb.org/manual/tutorial/query-documents/
:rtype: cursor, see http://api.mongodb.org/python/current/api/pymongo/\
cursor.html#pymongo.cursor.Cursor
and http://docs.mongodb.org/manual/core/cursors/
:return: the result of the query
"""
collection = self._table(db_name, collection_name)
try:
return collection.find(*args, **kwargs)
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def put_file(self, db_name, collection_name, data, name):
""" put data into gridfs """
fsdbh = gridfs.GridFS(self._database(db_name),
collection=collection_name)
# create a new record
try:
file_oid = fsdbh.put(data, filename=name)
return file_oid
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def get_file(self, db_name, collection_name, file_oid):
""" get data from gridfs by file object-id """
fsdbh = gridfs.GridFS(self._database(db_name),
collection=collection_name)
try:
return fsdbh.get(file_oid)
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
@retry_connect
def delete_file(self, db_name, collection_name, file_oid):
""" delete from gridfs by file object-id """
fsdbh = gridfs.GridFS(self._database(db_name),
collection=collection_name)
try:
return fsdbh.delete(file_oid)
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import time
import tempfile
import warnings
import tables
from tables import Group, Leaf, Table, Array
from tables import StringCol, IntCol, Int16Col, FloatCol, Float32Col
from tables.tests import common
from tables.tests.common import unittest
from tables.tests.common import PyTablesTestCase as TestCase
from six.moves import range
# Test Record class
class Record(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
var4 = FloatCol() # double (double-precision)
var5 = Float32Col() # float (single-precision)
class TreeTestCase(common.TempFileMixin, TestCase):
open_mode = "w"
title = "This is the table title"
expectedrows = 10
appendrows = 5
def setUp(self):
super(TreeTestCase, self).setUp()
# Create an instance of HDF5 Table
self.populateFile()
self.h5file.close()
def populateFile(self):
group = self.h5file.root
maxshort = 1 << 15
# maxint = 2147483647 # (2 ** 31 - 1)
for j in range(3):
# Create a table
table = self.h5file.create_table(group, 'table'+str(j), Record,
title=self.title,
filters=None,
expectedrows=self.expectedrows)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.expectedrows):
d['var1'] = '%04d' % (self.expectedrows - i)
d['var2'] = i
d['var3'] = i % maxshort
d['var4'] = float(i)
d['var5'] = float(i)
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var4List = [x['var4'] for x in table.iterrows()]
self.h5file.create_array(group, 'var1', var1List, "1")
self.h5file.create_array(group, 'var4', var4List, "4")
# Create a new group (descendant of group)
group2 = self.h5file.create_group(group, 'group'+str(j))
# Iterate over this new group (group2)
group = group2
def test00_getNode(self):
"""Checking the File.get_node() with string node names"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_getNode..." % self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
nodelist = ['/', '/table0', '/group0/var1', '/group0/group1/var4']
nodenames = []
for node in nodelist:
object = self.h5file.get_node(node)
nodenames.append(object._v_pathname)
self.assertEqual(nodenames, nodelist)
if common.verbose:
print("get_node(pathname) test passed")
nodegroups = [
'/', '/group0', '/group0/group1', '/group0/group1/group2']
nodenames = ['var1', 'var4']
nodepaths = []
for group in nodegroups:
for name in nodenames:
try:
object = self.h5file.get_node(group, name)
except LookupError:
pass
else:
nodepaths.append(object._v_pathname)
self.assertEqual(nodepaths,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("get_node(groupname, name) test passed")
nodelist = ['/', '/group0', '/group0/group1', '/group0/group1/group2',
'/table0']
nodenames = []
groupobjects = []
# warnings.filterwarnings("error", category=UserWarning)
for node in nodelist:
try:
object = self.h5file.get_node(node, classname='Group')
except LookupError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next LookupError was catched!")
print(value)
else:
nodenames.append(object._v_pathname)
groupobjects.append(object)
self.assertEqual(nodenames,
['/', '/group0', '/group0/group1',
'/group0/group1/group2'])
if common.verbose:
print("get_node(groupname, classname='Group') test passed")
# Reset the warning
# warnings.filterwarnings("default", category=UserWarning)
nodenames = ['var1', 'var4']
nodearrays = []
for group in groupobjects:
for name in nodenames:
try:
object = self.h5file.get_node(group, name, 'Array')
except:
pass
else:
nodearrays.append(object._v_pathname)
self.assertEqual(nodearrays,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("get_node(groupobject, name, classname='Array') test passed")
def test01_getNodeClass(self):
"""Checking the File.get_node() with instances"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_getNodeClass..." %
self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
# This tree ways of get_node usage should return a table instance
table = self.h5file.get_node("/group0/table1")
self.assertTrue(isinstance(table, Table))
table = self.h5file.get_node("/group0", "table1")
self.assertTrue(isinstance(table, Table))
table = self.h5file.get_node(self.h5file.root.group0, "table1")
self.assertTrue(isinstance(table, Table))
# This should return an array instance
arr = self.h5file.get_node("/group0/var1")
self.assertTrue(isinstance(arr, Array))
self.assertTrue(isinstance(arr, Leaf))
# And this a Group
group = self.h5file.get_node("/group0", "group1", "Group")
self.assertTrue(isinstance(group, Group))
def test02_listNodes(self):
"""Checking the File.list_nodes() method"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_listNodes..." % self.__class__.__name__)
# Made the warnings to raise an error
# warnings.filterwarnings("error", category=UserWarning)
self.h5file = tables.open_file(self.h5fname, "r")
self.assertRaises(TypeError,
self.h5file.list_nodes, '/', 'NoSuchClass')
nodelist = ['/', '/group0', '/group0/table1', '/group0/group1/group2',
'/var1']
nodenames = []
objects = []
for node in nodelist:
try:
objectlist = self.h5file.list_nodes(node)
except:
pass
else:
objects.extend(objectlist)
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0', '/table0', '/var1', '/var4',
'/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4'])
if common.verbose:
print("list_nodes(pathname) test passed")
nodenames = []
for node in objects:
try:
objectlist = self.h5file.list_nodes(node)
except:
pass
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/group2', '/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("list_nodes(groupobject) test passed")
nodenames = []
for node in objects:
try:
objectlist = self.h5file.list_nodes(node, 'Leaf')
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("list_nodes(groupobject, classname = 'Leaf') test passed")
nodenames = []
for node in objects:
try:
objectlist = self.h5file.list_nodes(node, 'Table')
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1', '/group0/group1/table2'])
if common.verbose:
print("list_nodes(groupobject, classname = 'Table') test passed")
# Reset the warning
# warnings.filterwarnings("default", category=UserWarning)
def test02b_iterNodes(self):
"""Checking the File.iter_nodes() method"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02b_iterNodes..." % self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
self.assertRaises(TypeError,
self.h5file.list_nodes, '/', 'NoSuchClass')
nodelist = ['/', '/group0', '/group0/table1', '/group0/group1/group2',
'/var1']
nodenames = []
objects = []
for node in nodelist:
try:
objectlist = [o for o in self.h5file.iter_nodes(node)]
except:
pass
else:
objects.extend(objectlist)
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0', '/table0', '/var1', '/var4',
'/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4'])
if common.verbose:
print("iter_nodes(pathname) test passed")
nodenames = []
for node in objects:
try:
objectlist = [o for o in self.h5file.iter_nodes(node)]
except:
pass
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/group1', '/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/group2', '/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("iter_nodes(groupobject) test passed")
nodenames = []
for node in objects:
try:
objectlist = [o for o in self.h5file.iter_nodes(node, 'Leaf')]
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1',
'/group0/var1', '/group0/var4',
'/group0/group1/table2',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("iter_nodes(groupobject, classname = 'Leaf') test passed")
nodenames = []
for node in objects:
try:
objectlist = [o for o in self.h5file.iter_nodes(node, 'Table')]
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next TypeError was catched!")
print(value)
else:
for object in objectlist:
nodenames.append(object._v_pathname)
self.assertEqual(nodenames,
['/group0/table1', '/group0/group1/table2'])
if common.verbose:
print("iter_nodes(groupobject, classname = 'Table') test passed")
# Reset the warning
# warnings.filterwarnings("default", category=UserWarning)
def test03_TraverseTree(self):
"""Checking the File.walk_groups() method"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03_TraverseTree..." %
self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
groups = []
tables_ = []
arrays = []
for group in self.h5file.walk_groups():
groups.append(group._v_pathname)
for table in self.h5file.list_nodes(group, 'Table'):
tables_.append(table._v_pathname)
for arr in self.h5file.list_nodes(group, 'Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/", "/group0", "/group0/group1",
"/group0/group1/group2"])
self.assertEqual(
tables_,
["/table0", "/group0/table1", "/group0/group1/table2"])
self.assertEqual(arrays,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("walk_groups() test passed")
groups = []
tables_ = []
arrays = []
for group in self.h5file.walk_groups("/group0/group1"):
groups.append(group._v_pathname)
for table in self.h5file.list_nodes(group, 'Table'):
tables_.append(table._v_pathname)
for arr in self.h5file.list_nodes(group, 'Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/group0/group1", "/group0/group1/group2"])
self.assertEqual(tables_, ["/group0/group1/table2"])
self.assertEqual(arrays, [
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("walk_groups(pathname) test passed")
def test04_walkNodes(self):
"""Checking File.walk_nodes"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_walkNodes..." % self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, "r")
self.assertRaises(TypeError, next,
self.h5file.walk_nodes('/', 'NoSuchClass'))
groups = []
tables1 = []
tables2 = []
arrays = []
for group in self.h5file.walk_nodes(classname="Group"):
groups.append(group._v_pathname)
for table in group._f_iter_nodes(classname='Table'):
tables1.append(table._v_pathname)
# Test the recursivity
for table in self.h5file.root._f_walknodes('Table'):
tables2.append(table._v_pathname)
for arr in self.h5file.walk_nodes(classname='Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/", "/group0", "/group0/group1",
"/group0/group1/group2"])
self.assertEqual(tables1,
["/table0", "/group0/table1",
"/group0/group1/table2"])
self.assertEqual(tables2,
["/table0", "/group0/table1",
"/group0/group1/table2"])
self.assertEqual(arrays,
['/var1', '/var4',
'/group0/var1', '/group0/var4',
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("File.__iter__() and Group.__iter__ test passed")
groups = []
tables_ = []
arrays = []
for group in self.h5file.walk_nodes("/group0/group1",
classname="Group"):
groups.append(group._v_pathname)
for table in group._f_walknodes('Table'):
tables_.append(table._v_pathname)
for arr in self.h5file.walk_nodes(group, 'Array'):
arrays.append(arr._v_pathname)
self.assertEqual(groups,
["/group0/group1", "/group0/group1/group2"])
self.assertEqual(tables_, ["/group0/group1/table2"])
self.assertEqual(arrays, [
'/group0/group1/var1', '/group0/group1/var4'])
if common.verbose:
print("walk_nodes(pathname, classname) test passed")
class DeepTreeTestCase(common.TempFileMixin, TestCase):
"""Checks for deep hierarchy levels in PyTables trees."""
def setUp(self):
super(DeepTreeTestCase, self).setUp()
# Here we put a more conservative limit to deal with more platforms
# With maxdepth = 64 this test would take less than 40 MB
# of main memory to run, which is quite reasonable nowadays.
# With maxdepth = 1024 this test will take around 300 MB.
if common.heavy:
self.maxdepth = 256 # Takes around 60 MB of memory!
else:
self.maxdepth = 64 # This should be safe for most machines
if common.verbose:
print("Maximum depth tested :", self.maxdepth)
# Open a new empty HDF5 file
group = self.h5file.root
if common.verbose:
print("Depth writing progress: ", end=' ')
# Iterate until maxdepth
for depth in range(self.maxdepth):
# Save it on the HDF5 file
if common.verbose:
print("%3d," % (depth), end=' ')
# Create a couple of arrays here
self.h5file.create_array(
group, 'array', [1, 1], "depth: %d" % depth)
self.h5file.create_array(
group, 'array2', [1, 1], "depth: %d" % depth)
# And also a group
self.h5file.create_group(group, 'group2_' + str(depth))
# Finally, iterate over a new group
group = self.h5file.create_group(group, 'group' + str(depth))
# Close the file
self.h5file.close()
def _check_tree(self, filename):
# Open the previous HDF5 file in read-only mode
with tables.open_file(filename, mode="r") as h5file:
group = h5file.root
if common.verbose:
print("\nDepth reading progress: ", end=' ')
# Get the metadata on the previosly saved arrays
for depth in range(self.maxdepth):
if common.verbose:
print("%3d," % (depth), end=' ')
# Check the contents
self.assertEqual(group.array[:], [1, 1])
self.assertTrue("array2" in group)
self.assertTrue("group2_"+str(depth) in group)
# Iterate over the next group
group = h5file.get_node(group, 'group' + str(depth))
if common.verbose:
print() # This flush the stdout buffer
def test00_deepTree(self):
"""Creation of a large depth object tree."""
self._check_tree(self.h5fname)
def test01a_copyDeepTree(self):
"""Copy of a large depth object tree."""
self.h5file = tables.open_file(self.h5fname, mode="r")
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w") as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
def test01b_copyDeepTree(self):
"""Copy of a large depth object tree with small node cache."""
self.h5file = tables.open_file(self.h5fname, mode="r",
node_cache_slots=10)
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w",
node_cache_slots=10) as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
def test01c_copyDeepTree(self):
"""Copy of a large depth object tree with no node cache."""
self.h5file = tables.open_file(self.h5fname, mode="r",
node_cache_slots=0)
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w",
node_cache_slots=0) as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
@unittest.skipUnless(common.heavy, 'only in heavy mode')
def test01d_copyDeepTree(self):
"""Copy of a large depth object tree with static node cache."""
self.h5file = tables.open_file(self.h5fname, mode="r",
node_cache_slots=-256)
h5fname2 = tempfile.mktemp(".h5")
try:
with tables.open_file(h5fname2, mode="w",
node_cache_slots=-256) as h5file2:
if common.verbose:
print("\nCopying deep tree...")
self.h5file.copy_node(self.h5file.root, h5file2.root,
recursive=True)
self.h5file.close()
self._check_tree(h5fname2)
finally:
if os.path.exists(h5fname2):
os.remove(h5fname2)
class WideTreeTestCase(common.TempFileMixin, TestCase):
"""Checks for maximum number of children for a Group."""
def test00_Leafs(self):
"""Checking creation of large number of leafs (1024) per group.
Variable 'maxchildren' controls this check. PyTables support up
to 4096 children per group, but this would take too much memory
(up to 64 MB) for testing purposes (may be we can add a test for
big platforms). A 1024 children run takes up to 30 MB. A 512
children test takes around 25 MB.
"""
if common.heavy:
maxchildren = 4096
else:
maxchildren = 256
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_wideTree..." %
self.__class__.__name__)
print("Maximum number of children tested :", maxchildren)
a = [1, 1]
if common.verbose:
print("Children writing progress: ", end=' ')
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
self.h5file.create_array(self.h5file.root, 'array' + str(child),
a, "child: %d" % child)
if common.verbose:
print()
t1 = time.time()
a = [1, 1]
# Open the previous HDF5 file in read-only mode
self._reopen()
if common.verbose:
print("\nTime spent opening a file with %d arrays: %s s" %
(maxchildren, time.time()-t1))
print("\nChildren reading progress: ", end=' ')
# Get the metadata on the previosly saved arrays
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
# Create an array for later comparison
# Get the actual array
array_ = getattr(self.h5file.root, 'array' + str(child))
b = array_.read()
# Arrays a and b must be equal
self.assertEqual(a, b)
if common.verbose:
print() # This flush the stdout buffer
def test01_wideTree(self):
"""Checking creation of large number of groups (1024) per group.
Variable 'maxchildren' controls this check. PyTables support up
to 4096 children per group, but this would take too much memory
(up to 64 MB) for testing purposes (may be we can add a test for
big platforms). A 1024 children run takes up to 30 MB. A 512
children test takes around 25 MB.
"""
if common.heavy:
# for big platforms!
maxchildren = 4096
else:
# for standard platforms
maxchildren = 256
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_wideTree..." %
self.__class__.__name__)
print("Maximum number of children tested :", maxchildren)
if common.verbose:
print("Children writing progress: ", end=' ')
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
self.h5file.create_group(self.h5file.root, 'group' + str(child),
"child: %d" % child)
if common.verbose:
print()
t1 = time.time()
# Open the previous HDF5 file in read-only mode
self._reopen()
if common.verbose:
print("\nTime spent opening a file with %d groups: %s s" %
(maxchildren, time.time()-t1))
print("\nChildren reading progress: ", end=' ')
# Get the metadata on the previosly saved arrays
for child in range(maxchildren):
if common.verbose:
print("%3d," % (child), end=' ')
# Get the actual group
group = getattr(self.h5file.root, 'group' + str(child))
# Arrays a and b must be equal
self.assertEqual(group._v_title, "child: %d" % child)
if common.verbose:
print() # This flush the stdout buffer
class HiddenTreeTestCase(common.TempFileMixin, TestCase):
"""Check for hidden groups, leaves and hierarchies."""
def setUp(self):
super(HiddenTreeTestCase, self).setUp()
self.visible = [] # list of visible object paths
self.hidden = [] # list of hidden object paths
# Create some visible nodes: a, g, g/a1, g/a2, g/g, g/g/a.
h5f = self.h5file
h5f.create_array('/', 'a', [0])
g = h5f.create_group('/', 'g')
h5f.create_array(g, 'a1', [0])
h5f.create_array(g, 'a2', [0])
g_g = h5f.create_group(g, 'g')
h5f.create_array(g_g, 'a', [0])
self.visible.extend(['/a', '/g', '/g/a1', '/g/a2', '/g/g', '/g/g/a'])
# Create some hidden nodes: _p_a, _p_g, _p_g/a, _p_g/_p_a, g/_p_a.
h5f.create_array('/', '_p_a', [0])
hg = h5f.create_group('/', '_p_g')
h5f.create_array(hg, 'a', [0])
h5f.create_array(hg, '_p_a', [0])
h5f.create_array(g, '_p_a', [0])
self.hidden.extend(
['/_p_a', '/_p_g', '/_p_g/a', '/_p_g/_p_a', '/g/_p_a'])
# The test behind commented out because the .objects dictionary
# has been removed (as well as .leaves and .groups)
def _test00_objects(self):
"""Absence of hidden nodes in `File.objects`."""
objects = self.h5file.objects
warnings.filterwarnings('ignore', category=DeprecationWarning)
for vpath in self.visible:
self.assertTrue(
vpath in objects,
"Missing visible node ``%s`` from ``File.objects``." % vpath)
for hpath in self.hidden:
self.assertTrue(
hpath not in objects,
"Found hidden node ``%s`` in ``File.objects``." % hpath)
warnings.filterwarnings('default', category=DeprecationWarning)
# The test behind commented out because the .objects dictionary
# has been removed (as well as .leaves and .groups)
def _test00b_objects(self):
"""Object dictionaries conformance with ``walk_nodes()``."""
def dictCheck(dictName, classname):
file_ = self.h5file
objects = getattr(file_, dictName)
walkPaths = [node._v_pathname
for node in file_.walk_nodes('/', classname)]
dictPaths = [path for path in objects]
walkPaths.sort()
dictPaths.sort()
self.assertEqual(
walkPaths, dictPaths,
"nodes in ``%s`` do not match those from ``walk_nodes()``"
% dictName)
self.assertEqual(
len(walkPaths), len(objects),
"length of ``%s`` differs from that of ``walk_nodes()``"
% dictName)
warnings.filterwarnings('ignore', category=DeprecationWarning)
dictCheck('objects', None)
dictCheck('groups', 'Group')
dictCheck('leaves', 'Leaf')
warnings.filterwarnings('default', category=DeprecationWarning)
def test01_getNode(self):
"""Node availability via `File.get_node()`."""
h5f = self.h5file
for vpath in self.visible:
h5f.get_node(vpath)
for hpath in self.hidden:
h5f.get_node(hpath)
def test02_walkGroups(self):
"""Hidden group absence in `File.walk_groups()`."""
hidden = self.hidden
for group in self.h5file.walk_groups('/'):
pathname = group._v_pathname
self.assertTrue(pathname not in hidden,
"Walked across hidden group ``%s``." % pathname)
def test03_walkNodes(self):
"""Hidden node absence in `File.walk_nodes()`."""
hidden = self.hidden
for node in self.h5file.walk_nodes('/'):
pathname = node._v_pathname
self.assertTrue(pathname not in hidden,
"Walked across hidden node ``%s``." % pathname)
def test04_listNodesVisible(self):
"""Listing visible nodes under a visible group (list_nodes)."""
hidden = self.hidden
for node in self.h5file.list_nodes('/g'):
pathname = node._v_pathname
self.assertTrue(pathname not in hidden,
"Listed hidden node ``%s``." % pathname)
def test04b_listNodesVisible(self):
"""Listing visible nodes under a visible group (iter_nodes)."""
hidden = self.hidden
for node in self.h5file.iter_nodes('/g'):
pathname = node._v_pathname
self.assertTrue(pathname not in hidden,
"Listed hidden node ``%s``." % pathname)
def test05_listNodesHidden(self):
"""Listing visible nodes under a hidden group (list_nodes)."""
hidden = self.hidden
node_to_find = '/_p_g/a'
found_node = False
for node in self.h5file.list_nodes('/_p_g'):
pathname = node._v_pathname
if pathname == node_to_find:
found_node = True
self.assertTrue(pathname in hidden,
"Listed hidden node ``%s``." % pathname)
self.assertTrue(found_node,
"Hidden node ``%s`` was not listed." % node_to_find)
def test05b_iterNodesHidden(self):
"""Listing visible nodes under a hidden group (iter_nodes)."""
hidden = self.hidden
node_to_find = '/_p_g/a'
found_node = False
for node in self.h5file.iter_nodes('/_p_g'):
pathname = node._v_pathname
if pathname == node_to_find:
found_node = True
self.assertTrue(pathname in hidden,
"Listed hidden node ``%s``." % pathname)
self.assertTrue(found_node,
"Hidden node ``%s`` was not listed." % node_to_find)
# The test behind commented out because the .objects dictionary
# has been removed (as well as .leaves and .groups)
def _test06_reopen(self):
"""Reopening a file with hidden nodes."""
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
self.test00_objects()
def test07_move(self):
"""Moving a node between hidden and visible groups."""
is_visible_node = self.h5file.is_visible_node
self.assertFalse(is_visible_node('/_p_g/a'))
self.h5file.move_node('/_p_g/a', '/g', 'a')
self.assertTrue(is_visible_node('/g/a'))
self.h5file.move_node('/g/a', '/_p_g', 'a')
self.assertFalse(is_visible_node('/_p_g/a'))
def test08_remove(self):
"""Removing a visible group with hidden children."""
self.assertTrue('/g/_p_a' in self.h5file)
self.h5file.root.g._f_remove(recursive=True)
self.assertFalse('/g/_p_a' in self.h5file)
class CreateParentsTestCase(common.TempFileMixin, TestCase):
"""Test the ``createparents`` flag.
These are mainly for the user interface. More thorough tests on the
workings of the flag can be found in the ``test_do_undo.py`` module.
"""
filters = tables.Filters(complevel=4) # simply non-default
def setUp(self):
super(CreateParentsTestCase, self).setUp()
self.h5file.create_array('/', 'array', [1])
self.h5file.create_group('/', 'group', filters=self.filters)
def test00_parentType(self):
"""Using the right type of parent node argument."""
h5file, root = self.h5file, self.h5file.root
self.assertRaises(TypeError, h5file.create_array,
root.group, 'arr', [1], createparents=True)
self.assertRaises(TypeError, h5file.copy_node,
'/array', root.group, createparents=True)
self.assertRaises(TypeError, h5file.move_node,
'/array', root.group, createparents=True)
self.assertRaises(TypeError, h5file.copy_children,
'/group', root, createparents=True)
def test01_inside(self):
"""Placing a node inside a nonexistent child of itself."""
self.assertRaises(tables.NodeError, self.h5file.move_node,
'/group', '/group/foo/bar',
createparents=True)
self.assertFalse('/group/foo' in self.h5file)
self.assertRaises(tables.NodeError, self.h5file.copy_node,
'/group', '/group/foo/bar',
recursive=True, createparents=True)
self.assertFalse('/group/foo' in self.h5fname)
def test02_filters(self):
"""Propagating the filters of created parent groups."""
self.h5file.create_group('/group/foo/bar', 'baz', createparents=True)
self.assertTrue('/group/foo/bar/baz' in self.h5file)
for group in self.h5file.walk_groups('/group'):
self.assertEqual(self.filters, group._v_filters)
def suite():
theSuite = unittest.TestSuite()
# This counter is useful when detecting memory leaks
niter = 1
for i in range(niter):
theSuite.addTest(unittest.makeSuite(TreeTestCase))
theSuite.addTest(unittest.makeSuite(DeepTreeTestCase))
theSuite.addTest(unittest.makeSuite(WideTreeTestCase))
theSuite.addTest(unittest.makeSuite(HiddenTreeTestCase))
theSuite.addTest(unittest.makeSuite(CreateParentsTestCase))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
|
|
# -*- coding: utf-8 -*-
'''Test cases for the ``ldap`` state module
This code is gross. I started out trying to remove some of the
duplicate code in the test cases, and before I knew it the test code
was an ugly second implementation.
I'm leaving it for now, but this should really be gutted and replaced
with something sensible.
'''
from __future__ import absolute_import
import copy
import salt.ext.six as six
import salt.states.ldap
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
patch,
)
ensure_in_syspath('../../')
# emulates the LDAP database. each key is the DN of an entry and it
# maps to a dict which maps attribute names to sets of values.
db = {}
def _init_db(newdb=None):
if newdb is None:
newdb = {}
global db
db = newdb
def _complex_db():
return {
'dnfoo': {
'attrfoo1': set((
'valfoo1.1',
'valfoo1.2',
)),
'attrfoo2': set((
'valfoo2.1',
)),
},
'dnbar': {
'attrbar1': set((
'valbar1.1',
'valbar1.2',
)),
'attrbar2': set((
'valbar2.1',
)),
},
}
class _dummy_ctx(object):
def __init__(self):
pass
def __enter__(self):
return self
def __exit__(self, *exc):
pass
def _dummy_connect(connect_spec):
return _dummy_ctx()
def _dummy_search(connect_spec, base, scope):
if base not in db:
return {}
return {base: dict(((attr, sorted(db[base][attr]))
for attr in db[base]
if len(db[base][attr])))}
def _dummy_add(connect_spec, dn, attributes):
assert dn not in db
assert len(attributes)
db[dn] = {}
for attr, vals in six.iteritems(attributes):
assert len(vals)
db[dn][attr] = set(vals)
return True
def _dummy_delete(connect_spec, dn):
assert dn in db
del db[dn]
return True
def _dummy_change(connect_spec, dn, before, after):
assert before != after
assert len(before)
assert len(after)
assert dn in db
e = db[dn]
assert e == before
all_attrs = set()
all_attrs.update(before)
all_attrs.update(after)
directives = []
for attr in all_attrs:
if attr not in before:
assert attr in after
assert len(after[attr])
directives.append(('add', attr, after[attr]))
elif attr not in after:
assert attr in before
assert len(before[attr])
directives.append(('delete', attr, ()))
else:
assert len(before[attr])
assert len(after[attr])
to_del = before[attr] - after[attr]
if len(to_del):
directives.append(('delete', attr, to_del))
to_add = after[attr] - before[attr]
if len(to_add):
directives.append(('add', attr, to_add))
return _dummy_modify(connect_spec, dn, directives)
def _dummy_modify(connect_spec, dn, directives):
assert dn in db
e = db[dn]
for op, attr, vals in directives:
if op == 'add':
assert len(vals)
existing_vals = e.setdefault(attr, set())
for val in vals:
assert val not in existing_vals
existing_vals.add(val)
elif op == 'delete':
assert attr in e
existing_vals = e[attr]
assert len(existing_vals)
if not len(vals):
del e[attr]
continue
for val in vals:
assert val in existing_vals
existing_vals.remove(val)
if not len(existing_vals):
del e[attr]
elif op == 'replace':
e.pop(attr, None)
e[attr] = set(vals)
else:
raise ValueError()
return True
def _dump_db(d=None):
if d is None:
d = db
return dict(((dn, dict(((attr, sorted(d[dn][attr]))
for attr in d[dn])))
for dn in d))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LDAPTestCase(TestCase):
def setUp(self):
__opts = getattr(salt.states.ldap, '__opts__', {})
salt.states.ldap.__opts__ = __opts
__salt = getattr(salt.states.ldap, '__salt__', {})
salt.states.ldap.__salt__ = __salt
self.patchers = [
patch.dict('salt.states.ldap.__opts__', {'test': False}),
]
for f in ('connect', 'search', 'add', 'delete', 'change', 'modify'):
self.patchers.append(
patch.dict('salt.states.ldap.__salt__',
{'ldap3.' + f: globals()['_dummy_' + f]}))
for p in self.patchers:
p.start()
self.maxDiff = None
def tearDown(self):
for p in reversed(self.patchers):
p.stop()
def _test_helper(self, init_db, expected_ret, replace,
delete_others=False):
_init_db(copy.deepcopy(init_db))
old = _dump_db()
new = _dump_db()
expected_db = copy.deepcopy(init_db)
for dn, attrs in six.iteritems(replace):
for attr, vals in six.iteritems(attrs):
if len(vals):
new.setdefault(dn, {})[attr] = sorted(set(vals))
expected_db.setdefault(dn, {})[attr] = set(vals)
elif dn in expected_db:
new[dn].pop(attr, None)
expected_db[dn].pop(attr, None)
if not len(expected_db.get(dn, {})):
new.pop(dn, None)
expected_db.pop(dn, None)
if delete_others:
dn_to_delete = set()
for dn, attrs in six.iteritems(expected_db):
if dn in replace:
to_delete = set()
for attr, vals in six.iteritems(attrs):
if attr not in replace[dn]:
to_delete.add(attr)
for attr in to_delete:
del attrs[attr]
del new[dn][attr]
if not len(attrs):
dn_to_delete.add(dn)
for dn in dn_to_delete:
del new[dn]
del expected_db[dn]
name = 'ldapi:///'
expected_ret['name'] = name
expected_ret.setdefault('result', True)
expected_ret.setdefault('comment', 'Successfully updated LDAP entries')
expected_ret.setdefault('changes', dict(
((dn, {'old': dict((attr, vals)
for attr, vals in six.iteritems(old[dn])
if vals != new.get(dn, {}).get(attr, ()))
if dn in old else None,
'new': dict((attr, vals)
for attr, vals in six.iteritems(new[dn])
if vals != old.get(dn, {}).get(attr, ()))
if dn in new else None})
for dn in replace
if old.get(dn, {}) != new.get(dn, {}))))
entries = [{dn: [{'replace': attrs},
{'delete_others': delete_others}]}
for dn, attrs in six.iteritems(replace)]
actual = salt.states.ldap.managed(name, entries)
self.assertDictEqual(expected_ret, actual)
self.assertDictEqual(expected_db, db)
def _test_helper_success(self, init_db, replace, delete_others=False):
self._test_helper(init_db, {}, replace, delete_others)
def _test_helper_nochange(self, init_db, replace, delete_others=False):
expected = {
'changes': {},
'comment': 'LDAP entries already set',
}
self._test_helper(init_db, expected, replace, delete_others)
def test_managed_empty(self):
_init_db()
name = 'ldapi:///'
expected = {
'name': name,
'changes': {},
'result': True,
'comment': 'LDAP entries already set',
}
actual = salt.states.ldap.managed(name, {})
self.assertDictEqual(expected, actual)
def test_managed_add_entry(self):
self._test_helper_success(
{},
{'dummydn': {'foo': ['bar', 'baz']}})
def test_managed_add_attr(self):
self._test_helper_success(
_complex_db(),
{'dnfoo': {'attrfoo3': ['valfoo3.1']}})
def test_managed_simplereplace(self):
self._test_helper_success(
_complex_db(),
{'dnfoo': {'attrfoo1': ['valfoo1.3']}})
def test_managed_deleteattr(self):
self._test_helper_success(
_complex_db(),
{'dnfoo': {'attrfoo1': []}})
def test_managed_deletenonexistattr(self):
self._test_helper_nochange(
_complex_db(),
{'dnfoo': {'dummyattr': []}})
def test_managed_deleteentry(self):
self._test_helper_success(
_complex_db(),
{'dnfoo': {}},
True)
def test_managed_deletenonexistentry(self):
self._test_helper_nochange(
_complex_db(),
{'dummydn': {}},
True)
def test_managed_deletenonexistattrinnonexistentry(self):
self._test_helper_nochange(
_complex_db(),
{'dummydn': {'dummyattr': []}})
def test_managed_add_attr_delete_others(self):
self._test_helper_success(
_complex_db(),
{'dnfoo': {'dummyattr': ['dummyval']}},
True)
def test_managed_no_net_change(self):
self._test_helper_nochange(
_complex_db(),
{'dnfoo': {'attrfoo1': ['valfoo1.2', 'valfoo1.1']}})
def test_managed_repeated_values(self):
self._test_helper_success(
{},
{'dummydn': {'dummyattr': ['dummyval', 'dummyval']}})
if __name__ == '__main__':
from integration import run_tests
run_tests(LDAPTestCase, needs_daemon=False)
|
|
# Ryu Izawa
# Written 2017-10-15
# Last updated 2017-11-05
import csv
import math
import json
import string
import random
import os.path
import numpy as np
import pandas as pd
import urllib, urllib2
start_latitude = 40.363377
start_longitude = -74.013535
start_latitude = None
start_longitude = None
# Google Maps API Keys
key = "&key=" + 'AIzaSyA_phpapSMniXBO2AfGjxp3ZfAD64wyh1s' # Verdi
#key = "&key=" + 'AIzaSyBXOIFAVM65wlaVsRjD-q6YnWQ4V0HpgZQ' # Dreamfish
step_size = 0.0001
search_radius = 0.0100
locations_limit = 100000
trail = []
forks = []
def get_nearest_pano(coord):
base = "https://maps.googleapis.com/maps/api/streetview/metadata?"
arg_location = 'location=' + coord
arg_heading = '&heading=' + '0'
full_URL = base + arg_location + arg_heading + key
req = urllib2.urlopen(full_URL)
reply = req.read()
json_parsed = json.loads(reply)
json_status = json_parsed['status']
if (json_status=='OK'):
json_date = json_parsed['date']
json_pano_id = json_parsed['pano_id']
json_latitude = json_parsed['location']['lat']
json_longitude = json_parsed['location']['lng']
return (json_date, json_pano_id, json_latitude, json_longitude, 0.0)
else:
return None
def there_exists_a_new_pano(some_location):
global trail
visited_panos = [pano[1] for pano in trail]
return not some_location[1] in visited_panos
def distance_between_panos(first_pano, second_pano):
if second_pano is not None and first_pano is not None:
scalar_distance = ((second_pano[2]-first_pano[2])**2 \
+ (second_pano[3]-first_pano[3])**2) \
** (1.0/2.0)
else:
scalar_distance = 1
return scalar_distance
def some_point_to_relative_bearing(observer_position, observer_track, relative_bearing):
# Given an observer's position, observer's track and some measure of distance,
# return a pano in the direction of the relative bearing, the given distance away.
# Return None if none exists.
steps = 0
new_point = None
absolute_bearing = observer_track + relative_bearing
lat_increment = math.sin(absolute_bearing)
lon_increment = math.cos(absolute_bearing)
while new_point is None:
steps += 1
if steps > 3:
break
latitude_of_the_new_point = observer_position[2] + (lat_increment * step_size * steps)
longitude_of_the_new_point = observer_position[3] + (lon_increment * step_size * steps)
coordinates_of_the_new_point = ('{},{}'.format(latitude_of_the_new_point, longitude_of_the_new_point))
np = get_nearest_pano(coordinates_of_the_new_point)
# Record the direction of travel.
if np is not None:
new_point = (np[0], np[1], np[2], np[3], math.degrees(observer_track))
if distance_between_panos(observer_position, new_point) < step_size/2.0:
new_point = None
return new_point
def next_step(current_point, current_track):
global forks
paths = set()
next_step = None
for relative_bearing in [math.pi * 2 * (3.0/4.0),
math.pi * 2 * (2.0/4.0),
math.pi * 2 * (1.0/4.0),
math.pi * 2 * (0.0/4.0)]:
potential_next_step = some_point_to_relative_bearing(current_point, current_track, relative_bearing)
if potential_next_step:
paths.add(potential_next_step)
for path in paths:
if there_exists_a_new_pano(path):
forks.append(path)
forks.append(current_point)
if forks:
forks.pop()
return forks.pop()
else:
return None
def travel_along_path(prior_point, current_point):
# When working with this trig, consider '0' often runs horizontally
# to the right in a conventional cartesian grid, with angles increasing counter-clockwise.
# We're using an absolute lat/lon grid, so '0' is geo-north and angles increase clockwise.
lat_track = current_point[2] - prior_point[2]
lon_track = current_point[3] - prior_point[3]
current_track = (math.atan2(lon_track,lat_track)+2*math.pi)%(2*math.pi)
way_ahead = next_step(current_point, current_track)
if distance_between_panos(start_point, way_ahead) < search_radius:
new_prior_point = current_point
new_current_point = way_ahead
else:
new_prior_point = forks.pop()
new_current_point = forks.pop()
return new_prior_point, new_current_point
def venture_forth(lat=None, lon=None):
# Starting at a given location,
# move outward along paths of extisting GSV panoramas.
global trail
global start_point
if os.path.isfile('./venture.csv'):
trail = pd.read_csv('./venture.csv').values.tolist()
else:
df = pd.DataFrame(trail, columns=['date', 'pano_id', 'latitude', 'longitude', 'comment'])
df.to_csv('venture.csv', index=False)
if lat is None and lon is None:
start_point = trail[1]
next_point = trail[-1]
last_point = trail[-2]
this_point = trail[-1]
else:
coordinates = ('{},{}'.format(lat, lon))
start_point = last_point = get_nearest_pano(coordinates)
next_point = this_point = next_step(start_point, 0.0)
trail.append(start_point)
sp = pd.DataFrame(list(start_point)).T
sp.to_csv('venture.csv', mode='a', header=False, index=False)
trail.append(next_point)
np = pd.DataFrame(list(next_point)).T
np.to_csv('venture.csv', mode='a', header=False, index=False)
while len(trail) <= locations_limit:
last_point, this_point = travel_along_path(last_point, this_point)
trail.append(this_point)
df = pd.DataFrame(list(this_point)).T
df.to_csv('venture.csv', mode='a', header=False, index=False)
print('{}: {:.4f} ; {:.4f} heading {:3.0f} {}'.format(len(trail), this_point[2], this_point[3], this_point[4], this_point[1]))
print '*** DONE VENTURING ***'
venture_forth(start_latitude, start_longitude)
|
|
"""
DataTypes used by this provider
"""
import inspect
import ipaddress
import logging
import os
import re
try:
from urllib.parse import urlparse
from urllib.parse import urljoin
except ImportError: # python 2
from urlparse import urlparse
from urlparse import urljoin
from keystoneclient.v3.regions import Region
import novaclient.exceptions as novaex
import swiftclient
from swiftclient.service import SwiftService
from swiftclient.service import SwiftUploadObject
from swiftclient.utils import generate_temp_url
from cloudbridge.base.resources import BaseAttachmentInfo
from cloudbridge.base.resources import BaseBucket
from cloudbridge.base.resources import BaseBucketObject
from cloudbridge.base.resources import BaseDnsRecord
from cloudbridge.base.resources import BaseDnsZone
from cloudbridge.base.resources import BaseFloatingIP
from cloudbridge.base.resources import BaseInstance
from cloudbridge.base.resources import BaseInternetGateway
from cloudbridge.base.resources import BaseKeyPair
from cloudbridge.base.resources import BaseMachineImage
from cloudbridge.base.resources import BaseNetwork
from cloudbridge.base.resources import BasePlacementZone
from cloudbridge.base.resources import BaseRegion
from cloudbridge.base.resources import BaseRouter
from cloudbridge.base.resources import BaseSnapshot
from cloudbridge.base.resources import BaseSubnet
from cloudbridge.base.resources import BaseVMFirewall
from cloudbridge.base.resources import BaseVMFirewallRule
from cloudbridge.base.resources import BaseVMType
from cloudbridge.base.resources import BaseVolume
from cloudbridge.interfaces.resources import GatewayState
from cloudbridge.interfaces.resources import InstanceState
from cloudbridge.interfaces.resources import MachineImageState
from cloudbridge.interfaces.resources import NetworkState
from cloudbridge.interfaces.resources import RouterState
from cloudbridge.interfaces.resources import SnapshotState
from cloudbridge.interfaces.resources import SubnetState
from cloudbridge.interfaces.resources import TrafficDirection
from cloudbridge.interfaces.resources import VolumeState
from .subservices import OpenStackBucketObjectSubService
from .subservices import OpenStackDnsRecordSubService
from .subservices import OpenStackFloatingIPSubService
from .subservices import OpenStackGatewaySubService
from .subservices import OpenStackSubnetSubService
from .subservices import OpenStackVMFirewallRuleSubService
ONE_GIG = 1048576000 # in bytes
FIVE_GIG = ONE_GIG * 5 # in bytes
log = logging.getLogger(__name__)
class OpenStackMachineImage(BaseMachineImage):
# ref: http://docs.openstack.org/developer/glance/statuses.html
IMAGE_STATE_MAP = {
'queued': MachineImageState.PENDING,
'saving': MachineImageState.PENDING,
'active': MachineImageState.AVAILABLE,
'killed': MachineImageState.ERROR,
'deleted': MachineImageState.UNKNOWN,
'pending_delete': MachineImageState.PENDING,
'deactivated': MachineImageState.ERROR
}
def __init__(self, provider, os_image):
super(OpenStackMachineImage, self).__init__(provider)
if isinstance(os_image, OpenStackMachineImage):
# pylint:disable=protected-access
self._os_image = os_image._os_image
else:
self._os_image = os_image
@property
def id(self):
"""
Get the image identifier.
"""
return self._os_image.id
@property
def name(self):
"""
Get the image identifier.
"""
return self._os_image.id
@property
def label(self):
"""
Get the image label.
"""
return self._os_image.name
@label.setter
# pylint:disable=arguments-differ
def label(self, value):
"""
Set the image label.
"""
self.assert_valid_resource_label(value)
self._provider.os_conn.image.update_image(
self._os_image, name=value or "")
@property
def description(self):
"""
Get the image description.
"""
return None
@property
def min_disk(self):
"""
Returns the minimum size of the disk that's required to
boot this image (in GB)
:rtype: ``int``
:return: The minimum disk size needed by this image
"""
return self._os_image.min_disk
def delete(self):
"""
Delete this image
"""
self._os_image.delete(self._provider.os_conn.image)
@property
def state(self):
return OpenStackMachineImage.IMAGE_STATE_MAP.get(
self._os_image.status, MachineImageState.UNKNOWN)
def refresh(self):
"""
Refreshes the state of this instance by re-querying the cloud provider
for its latest state.
"""
log.debug("Refreshing OpenStack Machine Image")
image = self._provider.compute.images.get(self.id)
if image:
# pylint:disable=protected-access
self._os_image = image._os_image
else:
# The image no longer exists and cannot be refreshed.
# set the status to unknown
self._os_image.status = 'unknown'
class OpenStackPlacementZone(BasePlacementZone):
def __init__(self, provider, zone, region):
super(OpenStackPlacementZone, self).__init__(provider)
if isinstance(zone, OpenStackPlacementZone):
# pylint:disable=protected-access
self._os_zone = zone._os_zone
# pylint:disable=protected-access
self._os_region = zone._os_region
else:
self._os_zone = zone
self._os_region = region
@property
def id(self):
"""
Get the zone id
:rtype: ``str``
:return: ID for this zone as returned by the cloud middleware.
"""
return self._os_zone
@property
def name(self):
"""
Get the zone name.
:rtype: ``str``
:return: Name for this zone as returned by the cloud middleware.
"""
# return self._os_zone.zoneName
return self._os_zone
@property
def region_name(self):
"""
Get the region that this zone belongs to.
:rtype: ``str``
:return: Name of this zone's region as returned by the cloud middleware
"""
return self._os_region
class OpenStackVMType(BaseVMType):
def __init__(self, provider, os_flavor):
super(OpenStackVMType, self).__init__(provider)
self._os_flavor = os_flavor
@property
def id(self):
return self._os_flavor.id
@property
def name(self):
return self._os_flavor.name
@property
def family(self):
# TODO: This may not be standardised across OpenStack
# but NeCTAR is using it this way
return self.extra_data.get('flavor_class:name')
@property
def vcpus(self):
return self._os_flavor.vcpus
@property
def ram(self):
return int(self._os_flavor.ram) / 1024
@property
def size_root_disk(self):
return self._os_flavor.disk
@property
def size_ephemeral_disks(self):
return 0 if self._os_flavor.ephemeral == 'N/A' else \
self._os_flavor.ephemeral
@property
def num_ephemeral_disks(self):
return 0 if self._os_flavor.ephemeral == 'N/A' else \
self._os_flavor.ephemeral
@property
def extra_data(self):
extras = self._os_flavor.get_keys()
extras['rxtx_factor'] = self._os_flavor.rxtx_factor
extras['swap'] = self._os_flavor.swap
extras['is_public'] = self._os_flavor.is_public
return extras
class OpenStackInstance(BaseInstance):
# ref: http://docs.openstack.org/developer/nova/v2/2.0_server_concepts.html
# and http://developer.openstack.org/api-ref-compute-v2.html
INSTANCE_STATE_MAP = {
'ACTIVE': InstanceState.RUNNING,
'BUILD': InstanceState.PENDING,
'DELETED': InstanceState.DELETED,
'ERROR': InstanceState.ERROR,
'HARD_REBOOT': InstanceState.REBOOTING,
'PASSWORD': InstanceState.PENDING,
'PAUSED': InstanceState.STOPPED,
'REBOOT': InstanceState.REBOOTING,
'REBUILD': InstanceState.CONFIGURING,
'RESCUE': InstanceState.CONFIGURING,
'RESIZE': InstanceState.CONFIGURING,
'REVERT_RESIZE': InstanceState.CONFIGURING,
'SOFT_DELETED': InstanceState.STOPPED,
'STOPPED': InstanceState.STOPPED,
'SUSPENDED': InstanceState.STOPPED,
'SHUTOFF': InstanceState.STOPPED,
'UNKNOWN': InstanceState.UNKNOWN,
'VERIFY_RESIZE': InstanceState.CONFIGURING
}
def __init__(self, provider, os_instance):
super(OpenStackInstance, self).__init__(provider)
self._os_instance = os_instance
@property
def id(self):
"""
Get the instance identifier.
"""
return self._os_instance.id
@property
def name(self):
"""
Get the instance identifier.
"""
return self.id
@property
# pylint:disable=arguments-differ
def label(self):
"""
Get the instance label.
"""
return self._os_instance.name
@label.setter
# pylint:disable=arguments-differ
def label(self, value):
"""
Set the instance label.
"""
self.assert_valid_resource_label(value)
self._os_instance.name = value
self._os_instance.update(name=value or "cb-inst")
@property
def public_ips(self):
"""
Get all the public IP addresses for this instance.
"""
# OpenStack doesn't provide an easy way to figure our whether an IP is
# public or private, since the returned IPs are grouped by an arbitrary
# network label. Therefore, it's necessary to parse the address and
# determine whether it's public or private
return [address
for _, addresses in self._os_instance.networks.items()
for address in addresses
if not ipaddress.ip_address(address).is_private]
@property
def private_ips(self):
"""
Get all the private IP addresses for this instance.
"""
return [address
for _, addresses in self._os_instance.networks.items()
for address in addresses
if ipaddress.ip_address(address).is_private]
@property
def vm_type_id(self):
"""
Get the VM type name.
"""
return self._os_instance.flavor.get('id')
@property
def vm_type(self):
"""
Get the VM type object.
"""
flavor = self._provider.nova.flavors.get(
self._os_instance.flavor.get('id'))
return OpenStackVMType(self._provider, flavor)
def reboot(self):
"""
Reboot this instance (using the cloud middleware API).
"""
self._os_instance.reboot()
@property
def image_id(self):
"""
Get the image ID for this instance.
"""
# In OpenStack, the Machine Image of a running instance may
# be deleted, so make sure the image exists before attempting to
# retrieve its id
return (self._os_instance.image.get("id")
if self._os_instance.image else "")
@property
def zone_id(self):
"""
Get the placement zone where this instance is running.
"""
return getattr(self._os_instance, 'OS-EXT-AZ:availability_zone', None)
@property
def subnet_id(self):
"""
Extract (one) subnet id associated with this instance.
In OpenStack, instances are associated with ports instead of
subnets so we need to dig through several connections to retrieve
the subnet_id. Further, there can potentially be several ports each
connected to different subnets. This implementation retrieves one
subnet, the one corresponding to port associated with the first
private IP associated with the instance.
"""
# MAC address can be used to identify a port so extract the MAC
# address corresponding to the (first) private IP associated with the
# instance.
for net in self._os_instance.to_dict().get('addresses').keys():
for iface in self._os_instance.to_dict().get('addresses')[net]:
if iface.get('OS-EXT-IPS:type') == 'fixed':
port = iface.get('OS-EXT-IPS-MAC:mac_addr')
addr = iface.get('addr')
break
# Now get a handle to a port with the given MAC address and get the
# subnet to which the private IP is connected as the desired id.
for prt in self._provider.neutron.list_ports().get('ports'):
if prt.get('mac_address') == port:
for ip in prt.get('fixed_ips'):
if ip.get('ip_address') == addr:
return ip.get('subnet_id')
@property
def vm_firewalls(self):
return [
self._provider.security.vm_firewalls.get(group.id)
for group in self._os_instance.list_security_group()
]
@property
def vm_firewall_ids(self):
"""
Get the VM firewall IDs associated with this instance.
"""
return [fw.id for fw in self.vm_firewalls]
@property
def key_pair_id(self):
"""
Get the id of the key pair associated with this instance.
"""
return self._os_instance.key_name
def create_image(self, label):
"""
Create a new image based on this instance.
"""
log.debug("Creating OpenStack Image with the label %s", label)
self.assert_valid_resource_label(label)
image_id = self._os_instance.create_image(label)
img = OpenStackMachineImage(
self._provider, self._provider.compute.images.get(image_id))
return img
def _get_fip(self, floating_ip):
"""Get a floating IP object based on the supplied ID."""
return self._provider.networking._floating_ips.get(None, floating_ip)
def add_floating_ip(self, floating_ip):
"""
Add a floating IP address to this instance.
"""
log.debug("Adding floating IP adress: %s", floating_ip)
fip = (floating_ip if isinstance(floating_ip, OpenStackFloatingIP)
else self._get_fip(floating_ip))
self._provider.os_conn.compute.add_floating_ip_to_server(
self.id, fip.public_ip)
def remove_floating_ip(self, floating_ip):
"""
Remove a floating IP address from this instance.
"""
log.debug("Removing floating IP adress: %s", floating_ip)
fip = (floating_ip if isinstance(floating_ip, OpenStackFloatingIP)
else self._get_fip(floating_ip))
self._provider.os_conn.compute.remove_floating_ip_from_server(
self.id, fip.public_ip)
def add_vm_firewall(self, firewall):
"""
Add a VM firewall to this instance
"""
log.debug("Adding firewall: %s", firewall)
self._os_instance.add_security_group(firewall.id)
def remove_vm_firewall(self, firewall):
"""
Remove a VM firewall from this instance
"""
log.debug("Removing firewall: %s", firewall)
self._os_instance.remove_security_group(firewall.id)
@property
def state(self):
return OpenStackInstance.INSTANCE_STATE_MAP.get(
self._os_instance.status, InstanceState.UNKNOWN)
def refresh(self):
"""
Refreshes the state of this instance by re-querying the cloud provider
for its latest state.
"""
instance = self._provider.compute.instances.get(
self.id)
if instance:
# pylint:disable=protected-access
self._os_instance = instance._os_instance
else:
# The instance no longer exists and cannot be refreshed.
# set the status to unknown
self._os_instance.status = 'unknown'
class OpenStackRegion(BaseRegion):
def __init__(self, provider, os_region):
super(OpenStackRegion, self).__init__(provider)
self._os_region = os_region
@property
def id(self):
return (self._os_region.id if type(self._os_region) == Region else
self._os_region)
@property
def name(self):
return self.id
@property
def zones(self):
# ``detailed`` param must be set to ``False`` because the (default)
# ``True`` value requires Admin privileges
if self.name == self._provider.region_name: # optimisation
zones = self._provider.nova.availability_zones.list(detailed=False)
else:
try:
# pylint:disable=protected-access
region_nova = self._provider._connect_nova_region(self.name)
zones = region_nova.availability_zones.list(detailed=False)
except novaex.EndpointNotFound:
# This region may not have a compute endpoint. If so just
# return an empty list
zones = []
return [OpenStackPlacementZone(self._provider, z.zoneName, self.name)
for z in zones]
class OpenStackVolume(BaseVolume):
# Ref: http://developer.openstack.org/api-ref-blockstorage-v2.html
VOLUME_STATE_MAP = {
'creating': VolumeState.CREATING,
'available': VolumeState.AVAILABLE,
'attaching': VolumeState.CONFIGURING,
'in-use': VolumeState.IN_USE,
'deleting': VolumeState.CONFIGURING,
'error': VolumeState.ERROR,
'error_deleting': VolumeState.ERROR,
'backing-up': VolumeState.CONFIGURING,
'restoring-backup': VolumeState.CONFIGURING,
'error_restoring': VolumeState.ERROR,
'error_extending': VolumeState.ERROR
}
def __init__(self, provider, volume):
super(OpenStackVolume, self).__init__(provider)
self._volume = volume
@property
def id(self):
return self._volume.id
@property
def name(self):
return self.id
@property
# pylint:disable=arguments-differ
def label(self):
"""
Get the volume label.
"""
return self._volume.name
@label.setter
# pylint:disable=arguments-differ
def label(self, value):
"""
Set the volume label.
"""
self.assert_valid_resource_label(value)
self._volume.name = value
self._volume.commit(self._provider.os_conn.block_storage)
@property
def description(self):
return self._volume.description
@description.setter
def description(self, value):
self._volume.description = value
self._volume.commit(self._provider.os_conn.block_storage)
@property
def size(self):
return self._volume.size
@property
def create_time(self):
return self._volume.created_at
@property
def zone_id(self):
return self._volume.availability_zone
@property
def source(self):
if self._volume.snapshot_id:
return self._provider.storage.snapshots.get(
self._volume.snapshot_id)
return None
@property
def attachments(self):
if self._volume.attachments:
return BaseAttachmentInfo(
self,
self._volume.attachments[0].get('server_id'),
self._volume.attachments[0].get('device'))
else:
return None
def attach(self, instance, device):
"""
Attach this volume to an instance.
"""
log.debug("Attaching %s to %s instance", device, instance)
instance_id = instance.id if isinstance(
instance,
OpenStackInstance) else instance
self._provider.os_conn.compute.create_volume_attachment(
server=instance_id, volume_id=self.id, device=device)
def detach(self, force=False):
"""
Detach this volume from an instance.
"""
for attachment in self._volume.attachments:
self._provider.os_conn.compute.delete_volume_attachment(
attachment['id'], attachment['server_id'])
def create_snapshot(self, label, description=None):
"""
Create a snapshot of this Volume.
"""
log.debug("Creating snapchat of volume: %s with the "
"description: %s", label, description)
return self._provider.storage.snapshots.create(
label, self, description=description)
@property
def state(self):
return OpenStackVolume.VOLUME_STATE_MAP.get(
self._volume.status, VolumeState.UNKNOWN)
def refresh(self):
"""
Refreshes the state of this volume by re-querying the cloud provider
for its latest state.
"""
vol = self._provider.storage.volumes.get(
self.id)
if vol:
# pylint:disable=protected-access
self._volume = vol._volume # pylint:disable=protected-access
else:
# The volume no longer exists and cannot be refreshed.
# set the status to unknown
self._volume.status = 'unknown'
class OpenStackSnapshot(BaseSnapshot):
# Ref: http://developer.openstack.org/api-ref-blockstorage-v2.html
SNAPSHOT_STATE_MAP = {
'creating': SnapshotState.PENDING,
'available': SnapshotState.AVAILABLE,
'deleting': SnapshotState.CONFIGURING,
'error': SnapshotState.ERROR,
'error_deleting': SnapshotState.ERROR
}
def __init__(self, provider, snapshot):
super(OpenStackSnapshot, self).__init__(provider)
self._snapshot = snapshot
@property
def id(self):
return self._snapshot.id
@property
def name(self):
return self.id
@property
# pylint:disable=arguments-differ
def label(self):
"""
Get the snapshot label.
"""
return self._snapshot.name
@label.setter
# pylint:disable=arguments-differ
def label(self, value):
"""
Set the snapshot label.
"""
self.assert_valid_resource_label(value)
self._snapshot.name = value
self._snapshot.commit(self._provider.os_conn.block_storage)
@property
def description(self):
return self._snapshot.description
@description.setter
def description(self, value):
self._snapshot.description = value
self._snapshot.commit(self._provider.os_conn.block_storage)
@property
def size(self):
return self._snapshot.size
@property
def volume_id(self):
return self._snapshot.volume_id
@property
def create_time(self):
return self._snapshot.created_at
@property
def state(self):
return OpenStackSnapshot.SNAPSHOT_STATE_MAP.get(
self._snapshot.status, SnapshotState.UNKNOWN)
def refresh(self):
"""
Refreshes the state of this snapshot by re-querying the cloud provider
for its latest state.
"""
snap = self._provider.storage.snapshots.get(
self.id)
if snap:
# pylint:disable=protected-access
self._snapshot = snap._snapshot
else:
# The snapshot no longer exists and cannot be refreshed.
# set the status to unknown
self._snapshot.status = 'unknown'
def create_volume(self, size=None, volume_type=None, iops=None):
"""
Create a new Volume from this Snapshot.
"""
vol_label = "from-snap-{0}".format(self.label or self.id)
self.assert_valid_resource_label(vol_label)
size = size if size else self._snapshot.size
os_vol = self._provider.os_conn.block_storage.create_volume(
size=size, name=vol_label, snapshot_id=self._snapshot.id,
availability_zone=self._provider.zone_name)
cb_vol = OpenStackVolume(self._provider, os_vol)
return cb_vol
class OpenStackNetwork(BaseNetwork):
# Ref: https://github.com/openstack/neutron/blob/master/neutron/plugins/
# common/constants.py
_NETWORK_STATE_MAP = {
'PENDING_CREATE': NetworkState.PENDING,
'PENDING_UPDATE': NetworkState.PENDING,
'PENDING_DELETE': NetworkState.PENDING,
'CREATED': NetworkState.PENDING,
'INACTIVE': NetworkState.PENDING,
'DOWN': NetworkState.DOWN,
'ERROR': NetworkState.ERROR,
'ACTIVE': NetworkState.AVAILABLE
}
def __init__(self, provider, network):
super(OpenStackNetwork, self).__init__(provider)
self._network = network
self._gateway_service = OpenStackGatewaySubService(provider, self)
self._subnet_svc = OpenStackSubnetSubService(provider, self)
@property
def id(self):
return self._network.get('id', None)
@property
def name(self):
return self.id
@property
def label(self):
return self._network.get('name', None)
@label.setter
def label(self, value):
"""
Set the network label.
"""
self.assert_valid_resource_label(value)
self._provider.neutron.update_network(
self.id, {'network': {'name': value or ""}})
self.refresh()
@property
def external(self):
return self._network.get('router:external', False)
@property
def state(self):
self.refresh()
return OpenStackNetwork._NETWORK_STATE_MAP.get(
self._network.get('status', None),
NetworkState.UNKNOWN)
@property
def cidr_block(self):
# OpenStack does not define a CIDR block for networks
return ''
@property
def subnets(self):
return self._subnet_svc
def refresh(self):
"""Refresh the state of this network by re-querying the provider."""
network = self._provider.networking.networks.get(self.id)
if network:
# pylint:disable=protected-access
self._network = network._network
else:
# Network no longer exists
self._network = {}
@property
def gateways(self):
return self._gateway_service
class OpenStackSubnet(BaseSubnet):
def __init__(self, provider, subnet):
super(OpenStackSubnet, self).__init__(provider)
self._subnet = subnet
self._state = None
@property
def id(self):
return self._subnet.get('id', None)
@property
def name(self):
return self.id
@property
def label(self):
return self._subnet.get('name', None)
@label.setter
def label(self, value): # pylint:disable=arguments-differ
"""
Set the subnet label.
"""
self.assert_valid_resource_label(value)
self._provider.neutron.update_subnet(
self.id, {'subnet': {'name': value or ""}})
self._subnet['name'] = value
@property
def cidr_block(self):
return self._subnet.get('cidr', None)
@property
def network_id(self):
return self._subnet.get('network_id', None)
@property
def zone(self):
"""
OpenStack does not have a notion of placement zone for subnets.
Default to None.
"""
return None
@property
def state(self):
return SubnetState.UNKNOWN if self._state == SubnetState.UNKNOWN \
else SubnetState.AVAILABLE
def refresh(self):
subnet = self._provider.networking.subnets.get(self.id)
if subnet:
# pylint:disable=protected-access
self._subnet = subnet._subnet
self._state = SubnetState.AVAILABLE
else:
# subnet no longer exists
self._state = SubnetState.UNKNOWN
class OpenStackFloatingIP(BaseFloatingIP):
def __init__(self, provider, floating_ip):
super(OpenStackFloatingIP, self).__init__(provider)
self._ip = floating_ip
@property
def id(self):
return self._ip.id
@property
def public_ip(self):
return self._ip.floating_ip_address
@property
def private_ip(self):
return self._ip.fixed_ip_address
@property
def in_use(self):
return bool(self._ip.port_id)
def refresh(self):
net = self._provider.networking.networks.get(
self._ip.floating_network_id)
gw = net.gateways.get_or_create()
fip = gw.floating_ips.get(self.id)
# pylint:disable=protected-access
self._ip = fip._ip
@property
def _gateway_id(self):
return self._ip.floating_network_id
class OpenStackRouter(BaseRouter):
def __init__(self, provider, router):
super(OpenStackRouter, self).__init__(provider)
self._router = router
@property
def id(self):
return getattr(self._router, 'id', None)
@property
def name(self):
return self.id
@property
def label(self):
return self._router.name
@label.setter
def label(self, value): # pylint:disable=arguments-differ
"""
Set the router label.
"""
self.assert_valid_resource_label(value)
self._router = self._provider.os_conn.update_router(self.id, value)
def refresh(self):
self._router = self._provider.os_conn.get_router(self.id)
@property
def state(self):
if self._router.external_gateway_info:
return RouterState.ATTACHED
return RouterState.DETACHED
@property
def network_id(self):
ports = self._provider.os_conn.list_ports(
filters={'device_id': self.id})
if ports:
return ports[0].network_id
return None
def attach_subnet(self, subnet):
ret = self._provider.os_conn.add_router_interface(
self._router.toDict(), subnet.id)
if subnet.id in ret.get('subnet_ids', ""):
return True
return False
def detach_subnet(self, subnet):
ret = self._provider.os_conn.remove_router_interface(
self._router.toDict(), subnet.id)
if not ret or subnet.id not in ret.get('subnet_ids', ""):
return True
return False
@property
def subnets(self):
# A router and a subnet are linked via a port, so traverse ports
# associated with the current router to find a list of subnets
# associated with it.
subnets = []
for port in self._provider.os_conn.list_ports(
filters={'device_id': self.id}):
for fixed_ip in port.fixed_ips:
subnets.append(self._provider.networking.subnets.get(
fixed_ip.get('subnet_id')))
return subnets
def attach_gateway(self, gateway):
self._provider.os_conn.update_router(
self.id, ext_gateway_net_id=gateway.id)
def detach_gateway(self, gateway):
# TODO: OpenStack SDK Connection object doesn't appear to have a method
# for detaching/clearing the external gateway.
self._provider.neutron.remove_gateway_router(self.id)
class OpenStackInternetGateway(BaseInternetGateway):
GATEWAY_STATE_MAP = {
NetworkState.AVAILABLE: GatewayState.AVAILABLE,
NetworkState.DOWN: GatewayState.ERROR,
NetworkState.ERROR: GatewayState.ERROR,
NetworkState.PENDING: GatewayState.CONFIGURING,
NetworkState.UNKNOWN: GatewayState.UNKNOWN
}
def __init__(self, provider, gateway_net):
super(OpenStackInternetGateway, self).__init__(provider)
if isinstance(gateway_net, OpenStackNetwork):
# pylint:disable=protected-access
gateway_net = gateway_net._network
self._gateway_net = gateway_net
self._fips_container = OpenStackFloatingIPSubService(provider, self)
@property
def id(self):
return self._gateway_net.get('id', None)
@property
def name(self):
return self._gateway_net.get('name', None)
@property
def network_id(self):
return self._gateway_net.get('id')
def refresh(self):
"""Refresh the state of this network by re-querying the provider."""
network = self._provider.networking.networks.get(self.id)
if network:
# pylint:disable=protected-access
self._gateway_net = network._network
else:
# subnet no longer exists
self._gateway_net.state = NetworkState.UNKNOWN
@property
def state(self):
return self.GATEWAY_STATE_MAP.get(
self._gateway_net.state, GatewayState.UNKNOWN)
@property
def floating_ips(self):
return self._fips_container
class OpenStackKeyPair(BaseKeyPair):
def __init__(self, provider, key_pair):
super(OpenStackKeyPair, self).__init__(provider, key_pair)
class OpenStackVMFirewall(BaseVMFirewall):
_network_id_tag = "CB-auto-associated-network-id: "
def __init__(self, provider, vm_firewall):
super(OpenStackVMFirewall, self).__init__(provider, vm_firewall)
self._rule_svc = OpenStackVMFirewallRuleSubService(provider, self)
@property
def network_id(self):
"""
OpenStack does not associate a fw with a network so extract from desc.
:return: The network ID supplied when this firewall was created or
`None` if ID cannot be identified.
"""
# Extracting networking ID from description
exp = ".*\\[" + self._network_id_tag + "([^\\]]*)\\].*"
matches = re.match(exp, self._description)
if matches:
return matches.group(1)
# We generally simulate a network being associated with a firewall;
# however, because of some networking specificity in Nectar, we must
# allow `None` return value as well in case an ID was not discovered.
else:
return None
@property
def _description(self):
return self._vm_firewall.description or ""
@property
def description(self):
desc_fragment = " [{}{}]".format(self._network_id_tag,
self.network_id)
desc = self._description
if desc:
return desc.replace(desc_fragment, "")
else:
return None
@description.setter
def description(self, value):
if not value:
value = ""
value += " [{}{}]".format(self._network_id_tag,
self.network_id)
self._provider.os_conn.network.update_security_group(
self.id, description=value)
self.refresh()
@property
def name(self):
"""
Return the name of this VM firewall.
"""
return self.id
@property
def label(self):
return self._vm_firewall.name
@label.setter
# pylint:disable=arguments-differ
def label(self, value):
self.assert_valid_resource_label(value)
self._provider.os_conn.network.update_security_group(
self.id, name=value or "")
self.refresh()
@property
def rules(self):
return self._rule_svc
def refresh(self):
self._vm_firewall = self._provider.os_conn.network.get_security_group(
self.id)
def to_json(self):
attr = inspect.getmembers(self, lambda a: not(inspect.isroutine(a)))
js = {k: v for(k, v) in attr if not k.startswith('_')}
json_rules = [r.to_json() for r in self.rules]
js['rules'] = json_rules
return js
class OpenStackVMFirewallRule(BaseVMFirewallRule):
def __init__(self, parent_fw, rule):
super(OpenStackVMFirewallRule, self).__init__(parent_fw, rule)
@property
def id(self):
return self._rule.get('id')
@property
def direction(self):
direction = self._rule.get('direction')
if direction == 'ingress':
return TrafficDirection.INBOUND
elif direction == 'egress':
return TrafficDirection.OUTBOUND
else:
return None
@property
def protocol(self):
return self._rule.get('protocol')
@property
def from_port(self):
return self._rule.get('port_range_min')
@property
def to_port(self):
return self._rule.get('port_range_max')
@property
def cidr(self):
return self._rule.get('remote_ip_prefix')
@property
def src_dest_fw_id(self):
fw = self.src_dest_fw
if fw:
return fw.id
return None
@property
def src_dest_fw(self):
fw_id = self._rule.get('remote_group_id')
if fw_id:
return self._provider.security.vm_firewalls.get(fw_id)
return None
class OpenStackBucketObject(BaseBucketObject):
def __init__(self, provider, cbcontainer, obj):
super(OpenStackBucketObject, self).__init__(provider)
self.cbcontainer = cbcontainer
self._obj = obj
@property
def id(self):
return self._obj.get("name")
@property
def name(self):
"""Get this object's name."""
return self.id
@property
def size(self):
return self._obj.get("bytes")
@property
def last_modified(self):
return self._obj.get("last_modified")
def iter_content(self):
"""Returns this object's content as an iterable."""
_, content = self._provider.swift.get_object(
self.cbcontainer.name, self.name, resp_chunk_size=65536)
return content
def upload(self, data):
"""
Set the contents of this object to the data read from the source
string.
.. warning:: Will fail if the data is larger than 5 Gig.
"""
self._provider.swift.put_object(self.cbcontainer.name, self.name,
data)
def upload_from_file(self, path):
"""
Stores the contents of the file pointed by the ``path`` variable.
If the file is bigger than 5 Gig, it will be broken into segments.
:type path: ``str``
:param path: Absolute path to the file to be uploaded to Swift.
:rtype: ``bool``
:return: ``True`` if successful, ``False`` if not.
.. note::
* The size of the segments chosen (or any of the other upload
options) is not under user control.
* If called this method will remap the
``swiftclient.service.get_conn`` factory method to
``self._provider._connect_swift``
.. seealso:: https://github.com/CloudVE/cloudbridge/issues/35#issuecomment-297629661 # noqa
"""
upload_options = {}
if 'segment_size' not in upload_options:
if os.path.getsize(path) >= FIVE_GIG:
upload_options['segment_size'] = FIVE_GIG
# remap the swift service's connection factory method
# pylint:disable=protected-access
swiftclient.service.get_conn = self._provider._connect_swift
result = True
with SwiftService() as swift:
upload_object = SwiftUploadObject(path, object_name=self.name)
for up_res in swift.upload(self.cbcontainer.name,
[upload_object, ],
options=upload_options):
result = result and up_res['success']
return result
def delete(self):
"""
Delete this object.
:rtype: ``bool``
:return: True if successful
.. note:: If called this method will remap the
``swiftclient.service.get_conn`` factory method to
``self._provider._connect_swift``
"""
# remap the swift service's connection factory method
# pylint:disable=protected-access
swiftclient.service.get_conn = self._provider._connect_swift
result = True
with SwiftService() as swift:
for del_res in swift.delete(self.cbcontainer.name, [self.name, ]):
result = result and del_res['success']
return result
def generate_url(self, expires_in):
# Set a temp url key on the object (http://bit.ly/2NBiXGD)
temp_url_key = "cloudbridge-tmp-url-key"
self._provider.swift.post_account(
headers={"x-account-meta-temp-url-key": temp_url_key})
base_url = urlparse(self._provider.swift.get_service_auth()[0])
access_point = "{0}://{1}".format(base_url.scheme, base_url.netloc)
url_path = "/".join([base_url.path, self.cbcontainer.name, self.name])
return urljoin(access_point, generate_temp_url(url_path, expires_in,
temp_url_key, 'GET'))
def refresh(self):
self._obj = self.cbcontainer.objects.get(self.id)._obj
class OpenStackBucket(BaseBucket):
def __init__(self, provider, bucket):
super(OpenStackBucket, self).__init__(provider)
self._bucket = bucket
self._object_container = OpenStackBucketObjectSubService(provider,
self)
@property
def id(self):
return self._bucket.get("name")
@property
def name(self):
return self.id
@property
def objects(self):
return self._object_container
class OpenStackDnsZone(BaseDnsZone):
def __init__(self, provider, dns_zone):
super(OpenStackDnsZone, self).__init__(provider)
self._dns_zone = dns_zone
self._dns_record_container = OpenStackDnsRecordSubService(
provider, self)
@property
def id(self):
return self._dns_zone.id
@property
def name(self):
return self._dns_zone.name
@property
def admin_email(self):
return self._dns_zone.email
@property
def records(self):
return self._dns_record_container
class OpenStackDnsRecord(BaseDnsRecord):
def __init__(self, provider, dns_zone, dns_record):
super(OpenStackDnsRecord, self).__init__(provider)
self._dns_zone = dns_zone
self._dns_rec = dns_record
@property
def id(self):
return self._dns_rec.id
@property
def name(self):
return self._dns_rec.name
@property
def zone_id(self):
return self._dns_zone.id
@property
def type(self):
return self._dns_rec.type
@property
def data(self):
return self._dns_rec.records
@property
def ttl(self):
return self._dns_rec.ttl
def delete(self):
# pylint:disable=protected-access
return self._provider.dns._records.delete(self._dns_zone, self)
|
|
#!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Module to manage IDL files."""
import copy
import pickle
import logging
import os
import os.path
import shutil
import idlnode
import idlparser
import idlrenderer
_logger = logging.getLogger('database')
class Database(object):
"""The Database class manages a collection of IDL files stored
inside a directory.
Each IDL is describing a single interface. The IDL files are written in the
FremontCut syntax, which is derived from the Web IDL syntax and includes
annotations.
Database operations include adding, updating and removing IDL files.
"""
def __init__(self, root_dir):
"""Initializes a Database over a given directory.
Args:
root_dir -- a directory. If directory does not exist, it will
be created.
"""
self._root_dir = root_dir
if not os.path.exists(root_dir):
_logger.debug('creating root directory %s' % root_dir)
os.makedirs(root_dir)
self._all_interfaces = {}
self._interfaces_to_delete = []
self._idlparser = idlparser.IDLParser(idlparser.FREMONTCUT_SYNTAX)
def Clone(self):
new_database = Database(self._root_dir)
new_database._all_interfaces = copy.deepcopy(self._all_interfaces)
new_database._interfaces_to_delete = copy.deepcopy(
self._interfaces_to_delete)
return new_database
def Delete(self):
"""Deletes the database by deleting its directory"""
if os.path.exists(self._root_dir):
shutil.rmtree(self._root_dir)
# reset in-memory constructs
self._all_interfaces = {}
def _ScanForInterfaces(self):
"""Iteratores over the database files and lists all interface names.
Return:
A list of interface names.
"""
res = []
def Visitor(_, dirname, names):
for name in names:
if os.path.isfile(os.path.join(dirname, name)):
root, ext = os.path.splitext(name)
if ext == '.idl':
res.append(root)
os.path.walk(self._root_dir, Visitor, None)
return res
def _FilePath(self, interface_name):
"""Calculates the file path that a given interface should
be saved to.
Args:
interface_name -- the name of the interface.
"""
return os.path.join(self._root_dir, '%s.idl' % interface_name)
def _LoadInterfaceFile(self, interface_name):
"""Loads an interface from the database.
Returns:
An IDLInterface instance or None if the interface is not found.
Args:
interface_name -- the name of the interface.
"""
file_name = self._FilePath(interface_name)
_logger.info('loading %s' % file_name)
if not os.path.exists(file_name):
return None
f = open(file_name, 'r')
content = f.read()
f.close()
# Parse file:
idl_file = idlnode.IDLFile(self._idlparser.parse(content), file_name)
if not idl_file.interfaces:
raise RuntimeError('No interface found in %s' % file_name)
elif len(idl_file.interfaces) > 1:
raise RuntimeError('Expected one interface in %s' % file_name)
interface = idl_file.interfaces[0]
self._all_interfaces[interface_name] = interface
return interface
def Load(self):
"""Loads all interfaces into memory.
"""
# FIXME: Speed this up by multi-threading.
for (interface_name) in self._ScanForInterfaces():
self._LoadInterfaceFile(interface_name)
self.Cache()
def Cache(self):
"""Serialize the database using pickle for faster startup in the future
"""
output_file = open(os.path.join(self._root_dir, 'cache.pickle'), 'wb')
pickle.dump(self._all_interfaces, output_file)
pickle.dump(self._interfaces_to_delete, output_file)
def LoadFromCache(self):
"""Deserialize the database using pickle for fast startup
"""
input_file_name = os.path.join(self._root_dir, 'cache.pickle')
if not os.path.isfile(input_file_name):
self.Load()
return
input_file = open(input_file_name, 'rb')
self._all_interfaces = pickle.load(input_file)
self._interfaces_to_delete = pickle.load(input_file)
input_file.close()
def Save(self):
"""Saves all in-memory interfaces into files."""
for interface in self._all_interfaces.values():
self._SaveInterfaceFile(interface)
for interface_name in self._interfaces_to_delete:
self._DeleteInterfaceFile(interface_name)
def _SaveInterfaceFile(self, interface):
"""Saves an interface into the database.
Args:
interface -- an IDLInterface instance.
"""
interface_name = interface.id
# Actual saving
file_path = self._FilePath(interface_name)
_logger.debug('writing %s' % file_path)
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
_logger.debug('creating directory %s' % dir_name)
os.mkdir(dir_name)
# Render the IDLInterface object into text.
text = idlrenderer.render(interface)
f = open(file_path, 'w')
f.write(text)
f.close()
def HasInterface(self, interface_name):
"""Returns True if the interface is in memory"""
return interface_name in self._all_interfaces
def GetInterface(self, interface_name):
"""Returns an IDLInterface corresponding to the interface_name
from memory.
Args:
interface_name -- the name of the interface.
"""
if interface_name not in self._all_interfaces:
raise RuntimeError('Interface %s is not loaded' % interface_name)
return self._all_interfaces[interface_name]
def AddInterface(self, interface):
"""Returns an IDLInterface corresponding to the interface_name
from memory.
Args:
interface -- the name of the interface.
"""
interface_name = interface.id
if interface_name in self._all_interfaces:
raise RuntimeError('Interface %s already exists' % interface_name)
self._all_interfaces[interface_name] = interface
def GetInterfaces(self):
"""Returns a list of all loaded interfaces."""
res = []
for _, interface in sorted(self._all_interfaces.items()):
res.append(interface)
return res
def DeleteInterface(self, interface_name):
"""Deletes an interface from the database. File is deleted when
Save() is called.
Args:
interface_name -- the name of the interface.
"""
if interface_name not in self._all_interfaces:
raise RuntimeError('Interface %s not found' % interface_name)
self._interfaces_to_delete.append(interface_name)
del self._all_interfaces[interface_name]
def _DeleteInterfaceFile(self, interface_name):
"""Actual file deletion"""
file_path = self._FilePath(interface_name)
if os.path.exists(file_path):
_logger.debug('deleting %s' % file_path)
os.remove(file_path)
def Hierarchy(self, interface):
yield interface
for parent in interface.parents:
parent_name = parent.type.id
if not self.HasInterface(parent.type.id):
continue
for parent_interface in self.Hierarchy(self.GetInterface(parent.type.id)):
yield parent_interface
|
|
# stdlib
from types import ListType
import time
# 3p
from mock import Mock
from nose.plugins.attrib import attr
import pymongo
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest, load_check
PORT1 = 37017
PORT2 = 37018
MAX_WAIT = 150
GAUGE = AgentCheck.gauge
RATE = AgentCheck.rate
class TestMongoUnit(AgentCheckTest):
"""
Unit tests for MongoDB AgentCheck.
"""
CHECK_NAME = 'mongo'
MONGODB_CONFIG = {
'server': "mongodb://localhost:%s/test" % PORT1
}
def test_build_metric_list(self):
"""
Build the metric list according to the user configuration.
Print a warning when an option has no match.
"""
# Initialize check
config = {
'instances': [self.MONGODB_CONFIG]
}
self.load_check(config)
setattr(self.check, "log", Mock())
build_metric_list = self.check._build_metric_list_to_collect
# Default metric list
DEFAULT_METRICS = {
m_name: m_type for d in [
self.check.BASE_METRICS, self.check.DURABILITY_METRICS,
self.check.LOCKS_METRICS, self.check.WIREDTIGER_METRICS, ]
for m_name, m_type in d.iteritems()
}
# No option
no_additional_metrics = build_metric_list([])
self.assertEquals(len(no_additional_metrics), len(DEFAULT_METRICS))
# Deprecate option, i.e. collected by default
default_metrics = build_metric_list(['wiredtiger'])
self.assertEquals(len(default_metrics), len(DEFAULT_METRICS))
self.assertEquals(self.check.log.warning.call_count, 1)
# One correct option
default_and_tcmalloc_metrics = build_metric_list(['tcmalloc'])
self.assertEquals(
len(default_and_tcmalloc_metrics),
len(DEFAULT_METRICS) + len(self.check.TCMALLOC_METRICS)
)
# One wrong and correct option
default_and_tcmalloc_metrics = build_metric_list(['foobar', 'top'])
self.assertEquals(
len(default_and_tcmalloc_metrics),
len(DEFAULT_METRICS) + len(self.check.TOP_METRICS)
)
self.assertEquals(self.check.log.warning.call_count, 2)
def test_metric_resolution(self):
"""
Resolve metric names and types.
"""
# Initialize check and tests
config = {
'instances': [self.MONGODB_CONFIG]
}
metrics_to_collect = {
'foobar': (GAUGE, 'barfoo'),
'foo.bar': (RATE, 'bar.foo'),
'fOoBaR': GAUGE,
'fOo.baR': RATE,
}
self.load_check(config)
resolve_metric = self.check._resolve_metric
# Assert
# Priority to aliases when defined
self.assertEquals((GAUGE, 'mongodb.barfoo'), resolve_metric('foobar', metrics_to_collect))
self.assertEquals((RATE, 'mongodb.bar.foops'), resolve_metric('foo.bar', metrics_to_collect)) # noqa
self.assertEquals((GAUGE, 'mongodb.qux.barfoo'), resolve_metric('foobar', metrics_to_collect, prefix="qux")) # noqa
# Resolve an alias when not defined
self.assertEquals((GAUGE, 'mongodb.foobar'), resolve_metric('fOoBaR', metrics_to_collect))
self.assertEquals((RATE, 'mongodb.foo.barps'), resolve_metric('fOo.baR', metrics_to_collect)) # noqa
self.assertEquals((GAUGE, 'mongodb.qux.foobar'), resolve_metric('fOoBaR', metrics_to_collect, prefix="qux")) # noqa
def test_metric_normalization(self):
"""
Metric names suffixed with `.R`, `.r`, `.W`, `.w` are renamed.
"""
# Initialize check and tests
config = {
'instances': [self.MONGODB_CONFIG]
}
metrics_to_collect = {
'foo.bar': GAUGE,
'foobar.r': GAUGE,
'foobar.R': RATE,
'foobar.w': RATE,
'foobar.W': GAUGE,
}
self.load_check(config)
resolve_metric = self.check._resolve_metric
# Assert
self.assertEquals((GAUGE, 'mongodb.foo.bar'), resolve_metric('foo.bar', metrics_to_collect)) # noqa
self.assertEquals((RATE, 'mongodb.foobar.sharedps'), resolve_metric('foobar.R', metrics_to_collect)) # noqa
self.assertEquals((GAUGE, 'mongodb.foobar.intent_shared'), resolve_metric('foobar.r', metrics_to_collect)) # noqa
self.assertEquals((RATE, 'mongodb.foobar.intent_exclusiveps'), resolve_metric('foobar.w', metrics_to_collect)) # noqa
self.assertEquals((GAUGE, 'mongodb.foobar.exclusive'), resolve_metric('foobar.W', metrics_to_collect)) # noqa
def test_state_translation(self):
"""
Check that resolving replset member state IDs match to names and descriptions properly.
"""
# Initialize check
config = {
'instances': [self.MONGODB_CONFIG]
}
self.load_check(config)
self.assertEquals('STARTUP2', self.check.get_state_name(5))
self.assertEquals('PRIMARY', self.check.get_state_name(1))
self.assertEquals('Starting Up', self.check.get_state_description(0))
self.assertEquals('Recovering', self.check.get_state_description(3))
# Unknown states:
self.assertEquals('UNKNOWN', self.check.get_state_name(500))
unknown_desc = self.check.get_state_description(500)
self.assertTrue(unknown_desc.find('500') != -1)
@attr(requires='mongo')
class TestMongo(AgentCheckTest):
CHECK_NAME = 'mongo'
def setUp(self):
server = "mongodb://localhost:%s/test" % PORT1
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=30000,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,)
db = cli['test']
foo = db.foo
foo.insert_one({'1': []})
foo.insert_one({'1': []})
foo.insert_one({})
bar = db.bar
bar.insert_one({'1': []})
bar.insert_one({})
def tearDown(self):
server = "mongodb://localhost:%s/test" % PORT1
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=30000,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,)
db = cli['test']
db.drop_collection("foo")
db.drop_collection("bar")
def testMongoCheck(self):
self.agentConfig = {
'version': '0.1',
'api_key': 'toto'
}
self.config = {
'instances': [{
'server': "mongodb://localhost:%s/test" % PORT1
}, {
'server': "mongodb://localhost:%s/test" % PORT2
}]
}
# Test mongodb with checks.d
self.check = load_check('mongo', self.config, self.agentConfig)
# Run the check against our running server
self.check.check(self.config['instances'][0])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(self.config['instances'][0])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
metric_val_checks = {
'mongodb.connections.current': lambda x: x >= 1,
'mongodb.connections.available': lambda x: x >= 1,
'mongodb.uptime': lambda x: x >= 0,
'mongodb.mem.resident': lambda x: x > 0,
'mongodb.mem.virtual': lambda x: x > 0,
'mongodb.collections.size': lambda x: x > 0
}
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
# Run the check against our running server
self.check.check(self.config['instances'][1])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(self.config['instances'][1])
# Service checks
service_checks = self.check.get_service_checks()
print service_checks
service_checks_count = len(service_checks)
self.assertTrue(isinstance(service_checks, ListType))
self.assertTrue(service_checks_count > 0)
self.assertEquals(len([sc for sc in service_checks if sc['check'] == self.check.SERVICE_CHECK_NAME]), 4, service_checks)
# Assert that all service checks have the proper tags: host and port
self.assertEquals(len([sc for sc in service_checks if "host:localhost" in sc['tags']]), service_checks_count, service_checks)
self.assertEquals(len([sc for sc in service_checks if "port:%s" % PORT1 in sc['tags'] or "port:%s" % PORT2 in sc['tags']]), service_checks_count, service_checks)
self.assertEquals(len([sc for sc in service_checks if "db:test" in sc['tags']]), service_checks_count, service_checks)
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
def testMongoOldConfig(self):
conf = {
'init_config': {},
'instances': [
{'server': "mongodb://localhost:%s/test" % PORT1},
{'server': "mongodb://localhost:%s/test" % PORT2},
]
}
# Test the first mongodb instance
self.check = load_check('mongo', conf, {})
# Run the check against our running server
self.check.check(conf['instances'][0])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(conf['instances'][0])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
metric_val_checks = {
'mongodb.connections.current': lambda x: x >= 1,
'mongodb.connections.available': lambda x: x >= 1,
'mongodb.uptime': lambda x: x >= 0,
'mongodb.mem.resident': lambda x: x > 0,
'mongodb.mem.virtual': lambda x: x > 0
}
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
# Run the check against our running server
self.check.check(conf['instances'][1])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(conf['instances'][1])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
def testMongoFsyncLock(self):
server = "mongodb://localhost:%s/test" % PORT1
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=30000,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,)
try:
cli.fsync(lock=True)
# Run the check
config = {
'instances': [
{'server': server}
]
}
self.run_check(config)
# Assert
self.assertMetric("mongodb.fsynclocked", 1, count=1)
finally:
cli.unlock()
|
|
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import six
import unittest2 as unittest
from mock.tests import support
from mock.tests.support import SomeClass, is_instance, callable
from mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch,
DEFAULT, call
)
from mock.mock import _patch, _get_target
builtin_string = '__builtin__'
if six.PY3:
builtin_string = 'builtins'
unicode = str
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'ord', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'ord'))
def test_patch_builtins_without_create(self):
@patch(__name__+'.ord')
def test_ord(mock_ord):
mock_ord.return_value = 101
return ord('c')
@patch(__name__+'.open')
def test_open(mock_open):
m = mock_open.return_value
m.read.return_value = 'abcd'
fobj = open('doesnotexists.txt')
data = fobj.read()
fobj.close()
return data
self.assertEqual(test_ord(), 101)
self.assertEqual(test_open(), 'abcd')
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
@unittest.expectedFailure
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec_set=SomeClass)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=SomeClass)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
for base in (str, unicode):
Foo = type('Foo', (base,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
p = patch('mock.tests.support', autospec=True)
m = p.start()
try:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
finally:
p.stop()
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
def test_wrapped_patch(self):
decorated = patch('sys.modules')(function)
self.assertIs(decorated.__wrapped__, function)
def test_wrapped_several_times_patch(self):
decorated = patch('sys.modules')(function)
decorated = patch('sys.modules')(decorated)
self.assertIs(decorated.__wrapped__, function)
def test_wrapped_patch_object(self):
decorated = patch.object(sys, 'modules')(function)
self.assertIs(decorated.__wrapped__, function)
def test_wrapped_patch_dict(self):
decorated = patch.dict('sys.modules')(function)
self.assertIs(decorated.__wrapped__, function)
def test_wrapped_patch_multiple(self):
decorated = patch.multiple('sys', modules={})(function)
self.assertIs(decorated.__wrapped__, function)
def test_stopall_lifo(self):
stopped = []
class thing(object):
one = two = three = None
def get_patch(attribute):
class mypatch(_patch):
def stop(self):
stopped.append(attribute)
return super(mypatch, self).stop()
return mypatch(lambda: thing, attribute, None, None,
False, None, None, None, {})
[get_patch(val).start() for val in ("one", "two", "three")]
patch.stopall()
self.assertEqual(stopped, ["three", "two", "one"])
if __name__ == '__main__':
unittest.main()
|
|
"""The test for the History Statistics sensor platform."""
# pylint: disable=protected-access
from datetime import datetime, timedelta
import unittest
from unittest.mock import patch
import pytest
import pytz
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import setup_component
from homeassistant.components.history_stats.sensor import HistoryStatsSensor
import homeassistant.core as ha
from homeassistant.helpers.template import Template
import homeassistant.util.dt as dt_util
from tests.common import init_recorder_component, get_test_home_assistant
class TestHistoryStatsSensor(unittest.TestCase):
"""Test the History Statistics sensor."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test the history statistics sensor setup."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'state': 'on',
'start': '{{ now().replace(hour=0)'
'.replace(minute=0).replace(second=0) }}',
'duration': '02:00',
'name': 'Test',
}
}
assert setup_component(self.hass, 'sensor', config)
state = self.hass.states.get('sensor.test')
assert state.state == STATE_UNKNOWN
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_period_parsing(self, mock):
"""Test the conversion from templates to period."""
now = datetime(2019, 1, 1, 23, 30, 0, tzinfo=pytz.utc)
with patch('homeassistant.util.dt.now', return_value=now):
today = Template('{{ now().replace(hour=0).replace(minute=0)'
'.replace(second=0) }}', self.hass)
duration = timedelta(hours=2, minutes=1)
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', today, None, duration, 'time', 'test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', None, today, duration, 'time', 'test')
sensor1.update_period()
sensor1_start, sensor1_end = sensor1._period
sensor2.update_period()
sensor2_start, sensor2_end = sensor2._period
# Start = 00:00:00
assert sensor1_start.hour == 0
assert sensor1_start.minute == 0
assert sensor1_start.second == 0
# End = 02:01:00
assert sensor1_end.hour == 2
assert sensor1_end.minute == 1
assert sensor1_end.second == 0
# Start = 21:59:00
assert sensor2_start.hour == 21
assert sensor2_start.minute == 59
assert sensor2_start.second == 0
# End = 00:00:00
assert sensor2_end.hour == 0
assert sensor2_end.minute == 0
assert sensor2_end.second == 0
def test_measure(self):
"""Test the history statistics sensor measure."""
t0 = dt_util.utcnow() - timedelta(minutes=40)
t1 = t0 + timedelta(minutes=20)
t2 = dt_util.utcnow() - timedelta(minutes=10)
# Start t0 t1 t2 End
# |--20min--|--20min--|--10min--|--10min--|
# |---off---|---on----|---off---|---on----|
fake_states = {
'binary_sensor.test_id': [
ha.State('binary_sensor.test_id', 'on', last_changed=t0),
ha.State('binary_sensor.test_id', 'off', last_changed=t1),
ha.State('binary_sensor.test_id', 'on', last_changed=t2),
]
}
start = Template('{{ as_timestamp(now()) - 3600 }}', self.hass)
end = Template('{{ now() }}', self.hass)
sensor1 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'unknown.id', 'on', start, end, None, 'time', 'Test')
sensor3 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'count', 'test')
sensor4 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'ratio', 'test')
assert sensor1._type == 'time'
assert sensor3._type == 'count'
assert sensor4._type == 'ratio'
with patch('homeassistant.components.history.'
'state_changes_during_period', return_value=fake_states):
with patch('homeassistant.components.history.get_state',
return_value=None):
sensor1.update()
sensor2.update()
sensor3.update()
sensor4.update()
assert sensor1.state == 0.5
assert sensor2.state is None
assert sensor3.state == 2
assert sensor4.state == 50
def test_wrong_date(self):
"""Test when start or end value is not a timestamp or a date."""
good = Template('{{ now() }}', self.hass)
bad = Template('{{ TEST }}', self.hass)
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', good, bad, None, 'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', bad, good, None, 'time', 'Test')
before_update1 = sensor1._period
before_update2 = sensor2._period
sensor1.update_period()
sensor2.update_period()
assert before_update1 == sensor1._period
assert before_update2 == sensor2._period
def test_wrong_duration(self):
"""Test when duration value is not a timedelta."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ now() }}',
'duration': 'TEST',
}
}
setup_component(self.hass, 'sensor', config)
assert self.hass.states.get('sensor.test')is None
with pytest.raises(TypeError):
setup_component(self.hass, 'sensor', config)()
def test_bad_template(self):
"""Test Exception when the template cannot be parsed."""
bad = Template('{{ x - 12 }}', self.hass) # x is undefined
duration = '01:00'
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', bad, None, duration, 'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', None, bad, duration, 'time', 'Test')
before_update1 = sensor1._period
before_update2 = sensor2._period
sensor1.update_period()
sensor2.update_period()
assert before_update1 == sensor1._period
assert before_update2 == sensor2._period
def test_not_enough_arguments(self):
"""Test config when not enough arguments provided."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ now() }}',
}
}
setup_component(self.hass, 'sensor', config)
assert self.hass.states.get('sensor.test')is None
with pytest.raises(TypeError):
setup_component(self.hass, 'sensor', config)()
def test_too_many_arguments(self):
"""Test config when too many arguments provided."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ as_timestamp(now()) - 3600 }}',
'end': '{{ now() }}',
'duration': '01:00',
}
}
setup_component(self.hass, 'sensor', config)
assert self.hass.states.get('sensor.test')is None
with pytest.raises(TypeError):
setup_component(self.hass, 'sensor', config)()
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
|