input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_routines'))
####Please do not remove lines above####
####Import your modules below if needed####
from FormFactors.Sphere import Sphere
from ff_sphere import ff_sphere_ml
from Chemical_Formula import Chemical_Formula
from PeakFunctions import LogNormal, Gaussian
from Structure_Factors import hard_sphere_sf, sticky_sphere_sf
from utils import find_minmax, calc_rho, create_steps
from functools import lru_cache
import time
class Biphasic_Sphere_Uniform: #Please put the class name same as the function name
def __init__(self, x=0, Np=20, flux=1e13, term='Total',dist='Gaussian', Energy=None, relement='Au', NrDep='False',
norm=1.0, sbkg=0.0, cbkg=0.0, abkg=0.0, D=1.0, phi=0.1, U=-1.0, SF='None',Rsig=0.0,
mpar={'Phase_1':{'Material':['Au','H2O'],
'Density':[19.32,1.0],
'VolFrac':[1.0,1.0],
'Rmoles':[1.0,0.0],
'R':[1.0,0.0]},
'Phase_2':{'Material':['Au','H2O'],
'Density':[19.32,1.0],
'VolFrac':[1.0,1.0],
'Rmoles':[1.0,0.0],
'R':[1.0,0.0]},
'Solvent':{'Material':['H2O','H2O'],
'Density':[1.0,1.0],
'VolFrac':[1.0,1.0],
'Rmoles':[1.0,0.0],
'R':[1.0,0.0]}}):
"""
Documentation
Calculates the Energy dependent form factor of multilayered spherical nanoparticles with two different set of materials
x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array
relement : Resonant element of the nanoparticle. Default: 'Au'
Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None
Np : No. of points with which the size distribution will be computed. Default: 10
NrDep : Energy dependence of the non-resonant element. Default= 'False' (Energy independent), 'True' (Energy dependent)
dist : The probablity distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian
norm : The density of the nanoparticles in Molar (Moles/Liter)
sbkg : Constant incoherent background for SAXS-term
cbkg : Constant incoherent background for cross-term
abkg : Constant incoherent background for Resonant-term
flux : Total X-ray flux to calculate the errorbar to simulate the errorbar for the fitted data
term : 'SAXS-term' or 'Cross-term' or 'Resonant-term' or 'Total'
D : Hard Sphere Diameter
phi : Volume fraction of particles
U : The sticky-sphere interaction energy
SF : Type of structure factor. Default: 'None'
Rsig : Widths of the total radius of the nanoparticles. Default: 0.0
mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'
Material ('Materials' using chemical formula),
Density ('Density' in gm/cubic-cms),
Density of solvent ('SolDensity' in gm/cubic-cms) of the particular layer
Mole-fraction ('Rmoles') of resonant element in the material)
Radii ('R' in Angs), and
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.norm=norm
self.sbkg=sbkg
self.cbkg=cbkg
self.abkg=abkg
self.dist=dist
self.Np=Np
self.Energy=Energy
self.relement=relement
self.NrDep=NrDep
#self.rhosol=rhosol
self.flux=flux
self.D=D
self.phi=phi
self.U=U
self.__mpar__=mpar #If there is any multivalued parameter
self.SF=SF
self.term=term
self.Rsig=Rsig
self.__Density__={}
self.__VolFrac__={}
self.__R__={}
self.__Rmoles__={}
self.__material__={}
self.choices={'dist':['Gaussian','LogNormal'],'NrDep':['True','False'],'SF':['None','Hard-Sphere', 'Sticky-Sphere'],
'term':['SAXS-term','Cross-term','Resonant-term','Total']} #If there are choices available for any fixed parameters
self.__fit__=False
self.__mkeys__=list(self.__mpar__.keys())
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)
"""
self.params=Parameters()
self.params.add('norm',value=self.norm,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)
self.params.add('D', value=self.D, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('phi', value=self.phi, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('sbkg',value=self.sbkg,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)
self.params.add('cbkg', value=self.cbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('abkg', value=self.abkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('U', value=self.U, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('Rsig',value=self.Rsig,vary=0,min=0,max=np.inf,expr=None,brute_step=0.1)
mkey1=self.__mkeys__[0]
for key in self.__mpar__[mkey1].keys():
if key != 'Material':
for i in range(len(self.__mpar__[mkey1][key])):
self.params.add('__%s_%s_%03d' % (mkey1, key, i), value=self.__mpar__[mkey1][key][i], vary=0,
min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
for mkey in self.__mkeys__[1:]:
for key in self.__mpar__[mkey].keys():
if key!='Material' and key!='R':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d'%(mkey, key,i),value=self.__mpar__[mkey][key][i],vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
elif key=='R':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d'%(mkey, key,i),value=self.__mpar__[mkey][key][i],vary=0,min=-np.inf,max=np.inf
,expr='__%s_%s_%03d'%(mkey1, key,i),brute_step=0.1)
@lru_cache(maxsize=10)
def calc_Rdist(self, R, Rsig, dist, N):
R = np.array(R)
totalR = np.sum(R[:-1])
if Rsig > 0.001:
fdist = eval(dist + '.' + dist + '(x=0.001, pos=totalR, wid=Rsig)')
if dist == 'Gaussian':
rmin, rmax = max(0.001, totalR - 5 * Rsig), totalR + 5 * Rsig
dr = np.linspace(rmin, rmax, N)
else:
rmin, rmax = max(-3, np.log(totalR) - 5 * Rsig), np.log(totalR) + 5 * Rsig
dr = np.logspace(rmin, rmax, N, base=np.exp(1.0))
fdist.x = dr
rdist = fdist.y()
sumdist = np.sum(rdist)
rdist = rdist / sumdist
return dr, rdist, totalR
else:
return [totalR], [1.0], totalR
@lru_cache(maxsize=10)
def new_sphere(self, q, R, Rsig, rho, eirho, adensity, dist='Gaussian',Np=10):
q = np.array(q)
dr, rdist, totalR = self.calc_Rdist(R, Rsig, dist, Np)
form = np.zeros_like(q)
eiform = np.zeros_like(q)
aform = np.zeros_like(q)
cform = np.zeros_like(q)
pfac = (4 * np.pi * 2.818e-5 * 1.0e-8) ** 2
for i in range(len(dr)):
r = np.array(R) * (1 + (dr[i] - totalR) / totalR)
ff, mff = ff_sphere_ml(q, r, rho)
form = form + rdist[i] * ff
eiff, meiff = ff_sphere_ml(q, r, eirho)
eiform = eiform + rdist[i] * eiff
aff, maff = ff_sphere_ml(q, r, adensity)
aform = aform + rdist[i] * aff
cform = cform + rdist[i] * (meiff * maff.conjugate()+meiff.conjugate()*maff)
return pfac * form, pfac * eiform, pfac * aform, np.abs(pfac * cform)/2 # in cm^2
@lru_cache(maxsize=2)
def new_sphere_dict(self, q, R, Rsig, rho, eirho, adensity, dist='Gaussian',Np=10,key='SAXS-term'):
form, eiform, aform, cform = self.new_sphere(q, R, Rsig, rho, eirho, adensity,dist=dist,Np=Np)
if key == 'SAXS-term':
return eiform
elif key == 'Resonant-term':
return aform
elif key == 'Cross-term':
return cform
elif key == 'Total':
return form
def update_params(self):
for mkey in self.__mkeys__:
key = 'Density'
Nmpar=len(self.__mpar__[mkey][key])
self.__Density__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'VolFrac'
self.__VolFrac__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'Rmoles'
self.__Rmoles__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'R'
self.__R__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]
key = 'Material'
self.__material__[mkey] = [self.__mpar__[mkey][key][i] for i in range(Nmpar)]
for mkey in self.__mkeys__[1:]:
key='R'
for i in range(Nmpar):
self.params['__%s_%s_%03d'%(mkey,key,i)].set(expr='__%s_%s_%03d'%(self.__mkeys__[0],key,i))
mkey = 'Solvent'
key = 'VolFrac'
for i in range(Nmpar):
self.params['__%s_%s_%03d' % (mkey, key, i)].set(
expr='1.0-__Phase_1_VolFrac_%03d-__Phase_2_VolFrac_%03d' % (i, i))
def y(self):
"""
Define the function in terms of x to return some value
"""
svol = 1.5 * 0.0172 ** 2 / 370 ** 2 # scattering volume in cm^3
self.output_params = {'scaler_parameters': {}}
self.update_params()
mkey = 'Solvent'
sol_density = tuple(np.ones_like(self.__Density__[mkey]))
R = self.__R__[mkey]
rho, eirho, adensity, rhor, eirhor, adensityr = calc_rho(R=tuple(R),
material=tuple(self.__material__[mkey]),
relement=self.relement,
density=tuple(self.__Density__[mkey]),
sol_density=sol_density,
Energy=self.Energy,
Rmoles=tuple(self.__Rmoles__[mkey]),
NrDep=self.NrDep)
for mkey in self.__mkeys__:
if mkey != 'Solvent':
trho, teirho, tadensity, trhor, teirhor, tadensityr = calc_rho(R=tuple(self.__R__[mkey]),
material=tuple(self.__material__[mkey]),
relement=self.relement,
density=tuple(self.__Density__[mkey]),
sol_density=sol_density,
Energy=self.Energy,
Rmoles=tuple(self.__Rmoles__[mkey]),
NrDep=self.NrDep)
vf = np.array(self.__VolFrac__[mkey])
rho = rho + vf * trho
eirho = eirho + vf * teirho
adensity = adensity + vf * tadensity
if type(self.x) == dict:
sqf = {}
for key in self.x.keys():
sqf[key] = self.norm * 6.022e20 * self.new_sphere_dict(tuple(self.x[key]), tuple(self.__R__[self.__mkeys__[0]]),
self.Rsig, tuple(rho), tuple(eirho),
tuple(adensity), key=key, dist=self.dist,Np=self.Np) # in cm^-1
if self.SF is None:
struct = np.ones_like(self.x[key]) # hard_sphere_sf(self.x[key], D = self.D, phi = 0.0)
elif self.SF == 'Hard-Sphere':
struct = hard_sphere_sf(self.x[key], D=self.D, phi=self.phi)
else:
struct = sticky_sphere_sf(self.x[key], D=self.D, phi=self.phi, U=self.U, delta=0.01)
if key == 'SAXS-term':
sqf[key] = sqf[key] * struct + self.sbkg
if key == 'Cross-term':
sqf[key] = sqf[key] * struct + self.cbkg
if key == 'Resonant-term':
sqf[key] = sqf[key] * struct + self.abkg
key1 = 'Total'
total = self.norm * 6.022e20 * struct * self.new_sphere_dict(tuple(self.x[key]), tuple(self.__R__[self.__mkeys__[0]]),
self.Rsig, tuple(rho), tuple(eirho),
tuple(adensity),
key=key1,dist=self.dist,Np=self.Np) + self.sbkg # in cm^-1
if not self.__fit__:
dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, self.dist, self.Np)
self.output_params['Distribution'] = {'x': dr, 'y': rdist}
self.output_params['Simulated_total_wo_err'] = {'x': self.x[key], 'y': total}
self.output_params['Total'] = {'x': self.x[key], 'y': total}
for key in self.x.keys():
self.output_params[key] = {'x': self.x[key], 'y': sqf[key]}
self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}
self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}
self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}
self.output_params['Structure_Factor'] = {'x': self.x[key], 'y': struct}
else:
if self.SF is None:
struct = np.ones_like(self.x)
elif self.SF == 'Hard-Sphere':
struct = hard_sphere_sf(self.x, D=self.D, phi=self.phi)
else:
struct = sticky_sphere_sf(self.x, D=self.D, phi=self.phi, U=self.U, delta=0.01)
tsqf, eisqf, asqf, csqf = self.new_sphere(tuple(self.x), tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, tuple(rho),
tuple(eirho), tuple(adensity),dist=self.dist,Np=self.Np)
sqf = self.norm * np.array(tsqf) * 6.022e20 * struct + self.sbkg # in cm^-1
# if not self.__fit__: #Generate all the quantities below while not fitting
asqf = self.norm * np.array(asqf) * 6.022e20 * struct + self.abkg # in cm^-1
eisqf = self.norm * np.array(eisqf) * 6.022e20 * struct + self.sbkg # in | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import network
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
class NodeSetBootDeviceTestCase(db_base.DbTestCase):
def test_node_set_boot_device_non_existent_device(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = db_utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info)
task = task_manager.TaskManager(self.context, node.uuid)
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_set_boot_device,
task,
device='fake')
def test_node_set_boot_device_valid(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = db_utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.management,
'set_boot_device') as mock_sbd:
conductor_utils.node_set_boot_device(task,
device='pxe')
mock_sbd.assert_called_once_with(task,
device='pxe',
persistent=False)
def test_node_set_boot_device_adopting(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = db_utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info,
provision_state=states.ADOPTING)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.management,
'set_boot_device') as mock_sbd:
conductor_utils.node_set_boot_device(task,
device='pxe')
self.assertFalse(mock_sbd.called)
class NodePowerActionTestCase(db_base.DbTestCase):
def setUp(self):
super(NodePowerActionTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
def test_node_power_action_power_on(self):
"""Test node_power_action to turn node power on."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
def test_node_power_action_power_on_notify(self, mock_notif):
"""Test node_power_action to power on node and send notification."""
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNone(node.last_error)
# 2 notifications should be sent: 1 .start and 1 .end
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.end',
obj_fields.NotificationLevel.INFO)
def test_node_power_action_power_off(self):
"""Test node_power_action to turn node power off."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_reboot(self):
"""Test for reboot a node."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power, 'reboot') as reboot_mock:
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
conductor_utils.node_power_action(task, states.REBOOT)
self.assertFalse(get_power_mock.called)
node.refresh()
reboot_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_invalid_state(self):
"""Test for exception when changing to an invalid power state."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
"INVALID_POWER_STATE")
node.refresh()
self.assertFalse(get_power_mock.called)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
# last_error is cleared when a new transaction happens
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
def test_node_power_action_invalid_state_notify(self, mock_notif):
"""Test for notification when changing to an invalid power state."""
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
"INVALID_POWER_STATE")
node.refresh()
self.assertFalse(get_power_mock.called)
self.assertEqual(states.POWER_ON, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNotNone(node.last_error)
# 2 notifications should be sent: 1 .start and 1 .error
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
def test_node_power_action_already_being_processed(self):
"""Test node power action after aborted power action.
The target_power_state is expected to be None so it isn't
checked in the code. This is what happens if it is not None.
(Eg, if a conductor had died during a previous power-off
attempt and left the target_power_state set to states.POWER_OFF,
and the user is attempting to power-off again.)
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
target_power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertEqual(states.NOSTATE, node['target_power_state'])
self.assertIsNone(node['last_error'])
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_node_power_action_in_same_state(self, log_mock):
"""Test setting node state to its present state.
Test that we don't try to set the power state if the requested
state is the same as the current state.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertFalse(set_power_mock.called,
"set_power_state unexpectedly called")
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
log_mock.warning.assert_called_once_with(
u"Not going to change node %(node)s power state because "
u"current state = requested state = '%(state)s'.",
{'state': states.POWER_ON, 'node': node.uuid})
def test_node_power_action_in_same_state_db_not_in_sync(self):
"""Test setting node state to its present state if DB is out of sync.
Under rare conditions (see bug #1403106) database might contain stale
information, make sure we fix it.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertFalse(set_power_mock.called,
"set_power_state unexpectedly called")
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_failed_getting_state(self):
"""Test for exception when we can't get the current power state."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_state_mock:
get_power_state_mock.side_effect = (
exception.InvalidParameterValue('failed getting power state'))
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_state_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
def test_node_power_action_failed_getting_state_notify(self, mock_notif):
"""Test for notification when we can't get the current power state."""
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_state_mock:
get_power_state_mock.side_effect = (
exception.InvalidParameterValue('failed getting power state'))
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_state_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNotNone(node.last_error)
# 2 notifications should be sent: 1 .start and 1 .error
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(second_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
def test_node_power_action_set_power_failure(self):
"""Test if an exception is thrown when the set_power call fails."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
get_power_mock.return_value = states.POWER_OFF
set_power_mock.side_effect = exception.IronicException()
self.assertRaises(
exception.IronicException,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
set_power_mock.assert_called_once_with(mock.ANY,
states.POWER_ON)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
def test_node_power_action_set_power_failure_notify(self, mock_notif):
"""Test if a notification is sent when the set_power call fails."""
self.config(notification_level='info')
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
get_power_mock.return_value = states.POWER_OFF
set_power_mock.side_effect = exception.IronicException()
self.assertRaises(
exception.IronicException,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
set_power_mock.assert_called_once_with(mock.ANY,
states.POWER_ON)
self.assertEqual(states.POWER_OFF, node.power_state)
self.assertIsNone(node.target_power_state)
self.assertIsNotNone(node.last_error)
# 2 notifications should be sent: 1 .start and 1 .error
self.assertEqual(2, mock_notif.call_count)
self.assertEqual(2, mock_notif.return_value.emit.call_count)
first_notif_args = mock_notif.call_args_list[0][1]
second_notif_args = mock_notif.call_args_list[1][1]
self.assertNotificationEqual(first_notif_args,
'ironic-conductor', CONF.host,
'baremetal.node.power_set.start',
obj_fields.NotificationLevel.INFO)
self.assertNotificationEqual(
second_notif_args, 'ironic-conductor', CONF.host,
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
def test_node_power_action_power_on_storage_attach(self):
"""Test node_power_action to turn node power on and attach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'attach_volumes',
autospec=True) as attach_mock:
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
attach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_reboot_storage_attach(self):
"""Test node_power_action to reboot the node and attach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'attach_volumes',
autospec=True) as attach_mock:
conductor_utils.node_power_action(task, states.REBOOT)
node.refresh()
attach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_off_storage_detach(self):
"""Test node_power_action to turn node power off and detach storage."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
storage_interface="cinder",
provision_state=states.ACTIVE)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(task.driver.storage,
'detach_volumes',
autospec=True) as detach_mock:
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
detach_mock.assert_called_once_with(task)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test__calculate_target_state(self):
for new_state in (states.POWER_ON, states.REBOOT, states.SOFT_REBOOT):
self.assertEqual(
states.POWER_ON,
conductor_utils._calculate_target_state(new_state))
for new_state in (states.POWER_OFF, states.SOFT_POWER_OFF):
self.assertEqual(
states.POWER_OFF,
conductor_utils._calculate_target_state(new_state))
self.assertIsNone(conductor_utils._calculate_target_state('bad_state'))
def test__can_skip_state_change_different_state(self):
"""Test setting node state to different state.
Test that we should change state if requested state is different from
current state.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
result = conductor_utils._can_skip_state_change(
task, states.POWER_OFF)
self.assertFalse(result)
get_power_mock.assert_called_once_with(mock.ANY)
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test__can_skip_state_change_same_state(self, mock_log):
"""Test setting node state to its present state.
| |
<gh_stars>0
#!/usr/bin/env python3
desc="""Convert basecalled Fast5 with modifications annotated by guppy v3.1.5+
to FastQ with modification probabilities encoded as FastQ qualities.
More info at: https://github.com/lpryszcz/modPhred
Dependencies: h5py, pyguppyclient, running guppy_basecall_server
TO DO:
- catch exception of guppy client
"""
epilog="""Author: <EMAIL>
Barcelona, 17/02/2021
"""
from guppy_encode import *
import pyguppyclient, re, time, tempfile
from pyguppyclient import GuppyClientBase, yield_reads
from pyguppyclient.ipc import SimpleRequestType, SimpleReplyType
def get_completed_reads(client, trace=True, state=False):
"""Get completed reads from pyguppyclient v0.0.6"""
reads = []
flag = (not trace) ^ state << 1
res = client.send(SimpleRequestType.GET_FIRST_CALLED_BLOCK, data=flag)
while res is not None:
read, called = res
while not called.complete:
_, block = client.send(SimpleRequestType.GET_NEXT_CALLED_BLOCK, data=read.read_tag)
called += block
# store read
reads.append((read, called))
res = client.send(SimpleRequestType.GET_FIRST_CALLED_BLOCK, data=flag)
return reads
def _get_read_data_v006(client):
"""Store read id, sequence and base_mod_probs. Compatible with v0.0.6"""
reads = []
for read, called in get_completed_reads(client):
reads.append((read.read_id, called.seq, called.mod_probs*255,
" ".join(called.mod_long_names), called.mod_alphabet))
return reads
def _get_read_data_v007a1(client):
"""Store read id, sequence and base_mod_probs. Compatible with v0.0.7a1.
"""
reads = [] # v0.0.7a1 returns reads, error_msg
for read in client.pcl_client.get_completed_reads()[0]:
md, ds = read["metadata"], read["datasets"]
reads.append((md["read_id"], ds["sequence"], ds["base_mod_probs"],
md["base_mod_long_names"], md["base_mod_alphabet"]))
return reads
def _get_read_data_v009(client):
"""Store read id, sequence and base_mod_probs. Compatible with v0.0.9.
"""
reads = [] # v0.0.9 returns reads
for read in client.pcl_client.get_completed_reads():
md, ds = read["metadata"], read["datasets"]
reads.append((md["read_id"], ds["sequence"], ds["base_mod_probs"],
md["base_mod_long_names"], md["base_mod_alphabet"]))
return reads
def get_basecalled_reads_data(fn, client, _get_read_data):
"""Return basecalled reads from given Fast5 file.
This implementation is >10x faster than using GuppyBasecallerClient.basecall()
"""
reads = []
# submit all reads
for ri, read in enumerate(yield_reads(fn), 1):
#if ri>100: break
client.pass_read(read)
# gradually grab basecalled reads (initially there will be none)
if not ri%100: reads += _get_read_data(client)
# wait for the rest of the reads
while len(reads)<ri:
time.sleep(.1)
reads += _get_read_data(client)
return reads
def basecalling_worker(args):
"""Basecalling worker.
Basecalling is running as a separate worker process, so GPU is fully loaded.
Here read objects are small (10-100 Mb per Fast5), thus easy to pickle.
"""
fn, config, host, port = args
# define parameters for pyguppyclient v0.0.6 or newer
ver = pyguppyclient.__version__
kwargs = {} # no trace=True for v0.0.6, v0.0.7a1
if ver=="0.0.6": _get_read_data = _get_read_data_v006
elif ver=="0.0.7a1": _get_read_data = _get_read_data_v007a1
elif ver=="0.0.9":
kwargs = {"trace": True}
_get_read_data = _get_read_data_v009
else:
sys.stderr.write("[ERROR] Unsupported pyguppy version: %s\n"%ver)
sys.exit(1)
# here due to v0.0.7a1 errors, we have to skip with and use connect after patching
#with GuppyClientBase(config_name=config, host=host, port=port, **kwargs) as client:
client = GuppyClientBase(config_name=config, host=host, port=port, **kwargs)
# patch v0.0.7a1 that doesn't return trace and mod_probs
if pyguppyclient.__version__=="0.0.7a1":
client.pcl_client.set_params({'move_and_trace_enabled': True})
client.connect()
reads = get_basecalled_reads_data(fn, client, _get_read_data)
client.disconnect() # this is a bit dirty, but works fine
return fn, reads
def get_encoded_FastQ(reads, fn, MaxPhredProb):
"""Store modificoation probability in FastQ"""
basecount = warns = 0
data, rname = [], ""
alphabet, symbol2modbase, canonical2mods, base2positions, mods2count = '', {}, {}, {}, {}
# open out file with gzip compression
outfn = fn+".fq.gz_"
# The default mode is "rb", and the default compresslevel is 9.
out = gzip.open(outfn, "wt")
for ri, (read_name, seq, mod_probs, mods, output_alphabet) in enumerate(reads, 1):
# prepare data storage if not already prepared
if ri==1:
alphabet, symbol2modbase, canonical2mods, base2positions = get_alphabet(output_alphabet, mods)
rna = True if "U" in alphabet else False
mods2count = {m: 0 for mods in canonical2mods.values() for m in mods}
# get mod probabilities and normalise to MaxPhredProb
modbaseprobNorm = np.array(mod_probs / MaxProb * MaxPhredProb, dtype='uint8')
# reverse only if RNA
if rna: modbaseprobNorm = modbaseprobNorm[::-1]
basecount += len(seq)
# get modprobs as qualities
phredmodprobs, mod2count = get_phredmodprobs(seq, modbaseprobNorm, mods2count, base2positions, canonical2mods, MaxPhredProb)
out.write("@%s\n%s\n+\n%s\n"%(read_name, seq, phredmodprobs))
# report number of bases
if rname: name = "/".join(fn.split("/")[-4:])
# mv only if finished
os.replace(fn+".fq.gz_", fn+".fq.gz")
return basecount, mods2count, alphabet, symbol2modbase, canonical2mods, base2positions
def start_guppy_server(host, config, port, device):
"""Start guppy_basecall_server and return its Popen object, host and port.
This starts new basecall server only if host is a path to guppy binary.
Otherwise, it'll use host:port provided on the startup.
"""
def get_guppy_port(tmp, pat=re.compile(r'Starting server on port:\W+(\d+)')):
"""Return guppy port"""
if os.path.isfile(tmp):
for line in open(tmp):
for m in pat.finditer(line):
return int(m.groups()[0])
return None
proc = None
if os.path.isfile(host):
logger("Starting %s ..."%host)
# get uniquely named tmp file
tmp = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()))
# create log
log = open(tmp+".log", "w")
# start guppy process
args = [host, "-l", tmp, "-c", config, "-x", device, "-p", "auto"]#; print(args)
proc = subprocess.Popen(args, shell=False, stdout=log, stderr=log)
while True:
# get guppy port
port = get_guppy_port(log.name)
if port is not None: break
# if no port, check if guppy init failed
exitcode = proc.poll()
if exitcode is not None:
sys.stderr.write("There was some issue while starting guppy.\n")
sys.stderr.write("Check logs for details: grep '' %s* \n"%tmp)
# output guppy log to stderr
log.close()
for line in open(log.name): sys.stderr.write(line)
sys.exit(exitcode)
time.sleep(0.1)
host = "localhost"
return proc, host, port
def mod_encode(indirs, threads, config, host, port, MaxModsPerBase=3,
recursive=False, remove=False, device="cuda:0"):
"""Convert basecalled Fast5 into FastQ with base modification probabilities
encoded as FastQ qualities.
"""
# start guppy server if needed
guppy_proc, host, port = start_guppy_server(host, config, port, device)
fast5_dirs = set()
MaxPhredProb = get_MaxPhredProb(MaxModsPerBase)
logger("Encoding modification info from %s directories...\n"%len(indirs))
for indir in indirs:
if recursive:
fnames = sorted(map(str, Path(indir).rglob('*.fast5')))
else:
fnames = sorted(map(str, Path(indir).glob('*.fast5')))
# process & remove Fast5 files modified more than --remove minutes ago
if remove:
now = time.time()
fnames = list(filter(lambda f: now-os.path.getmtime(f)>=remove*60, fnames))
logger(" %s with %s Fast5 file(s)...\n"%(indir, len(fnames)))
# load current data - this may cause problems if recursion was done...
data = load_info(indir, recursive)
# exit if not fnames nor modPhred.pkl
if not fnames and not data:
warning("[mod_encode][WARNING] No Fast5 files and no previously process data in %s\n"%indir)
sys.exit(1)
continue
# get already processed files
fast5 = {}
if data:
# start from the beginning if different MaxModsPerBase used
if data["MaxModsPerBase"] != MaxModsPerBase:
info = "[mod_encode][WARNING] Previously you used --MaxModsPerBase %s, while now %s. Recomputing FastQ files..."
warning(info%(data["MaxModsPerBase"], MaxModsPerBase))
else:
fast5 = data["fast5"]
logger(" %s were processed earlier."%len(fast5), add_memory=0)
# process files if not already processed (FastQ not present or FastQ is older than Fast5)
p = Pool(1) #, maxtasksperchild=1000)
args = [(fn, config, host, port) for fn in fnames
if not os.path.isfile(fn+".fq.gz")
or os.path.getmtime(fn)>os.path.getmtime(fn+".fq.gz")]
for ii, (fn, reads) in enumerate(p.imap(basecalling_worker, args), 1):
# store modification probability in FastQ
(basecount, mods2count, alphabet, symbol2modbase, canonical2mods,
base2positions) = get_encoded_FastQ(reads, fn, MaxPhredProb)
# skip files without bases
if not basecount: continue
sys.stderr.write(" %s / %s %s with %s bases. Detected mods: %s \r"%(ii, len(fnames), os.path.basename(fn), basecount, str(mods2count)))
data = load_info(indir, recursive)
# store data
if data:
# either add new Fast5
fast5 = data["fast5"]
fast5[fn] = basecount
fast5mod = data["fast5mod"]
fast5mod[fn] = mods2count
else:
# or start pickle from scratch if doesn't exists
fast5 = {fn: basecount}
fast5mod = {fn: mods2count}
moltype, bases = get_moltype(alphabet)
nmods = sum(len(v) for k, v in canonical2mods.items())
info = " %s alphabet with %s modification(s) %s. symbol2modbase: %s"
logger(info%(moltype, nmods, str(canonical2mods), str(symbol2modbase)), add_memory=0)
# make sure --MaxModsPerBase is sufficiently large
maxnmodsperbase = max(len(v) for k, v in canonical2mods.items())
if maxnmodsperbase>MaxModsPerBase:
info = "[mod_encode][WARNING] Too many modifications per base (%s). \nPlease restart with --MaxModsPerBase %s or larger!"
warning(info%(maxnmodsperbase, maxnmodsperbase))
# this keeps info on completed Fast5>FastQ this way
dump_info(indir, alphabet, symbol2modbase, canonical2mods, base2positions, fast5, fast5mod,
MaxModsPerBase, MaxPhredProb)
# report total number of bases for project
data = load_info(indir, recursive)
symbol2modbase = data["symbol2modbase"]
totbases = sum(v for k, v in data["fast5"].items())
# get total number of modifications
mods2count = {}
if "fast5mod" in data:
for fn in data["fast5mod"]:
for k, v in data["fast5mod"][fn].items():
if k not in mods2count: mods2count[k] = v
else: mods2count[k] += v
modcount = ", ".join(("{:,} {} [{:7,.3%}]".format(c, symbol2modbase[m], c/totbases)
for m, c in mods2count.items()))
logger(" {:,} bases saved in FastQ, of those: {} ".format(totbases, modcount), add_memory=0)
# close pool
p.close()
# and guppy basecall server if it was started by this process
if guppy_proc: guppy_proc.terminate()
return fast5_dirs
def warning(info):
def count(i=0):
i+=1
logger(info, add_timestamp=0, | |
<reponame>kzeiler/modflow6
import os
import numpy as np
import pytest
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from flopy.utils.lgrutil import Lgr
from framework import testing_framework
from simulation import Simulation
# Test for the interface model approach.
# It compares the result of a single, strongly anisotropic model
# with XT3D enabled to the equivalent case where the domain is
# decomposed and joined by a GWF-GWF exchange with XT3D applied.
#
# 'refmodel' 'leftmodel' 'rightmodel'
#
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 VS 1 1 1 1 1 + 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
#
# The head values should always be indentical. All models are
# part of the same solution for convenience.
# In addition, a check on the x,y,z components of specific discharge
# is present. The values of the left submodel are compared to
# the left part of the full model, and similar for right: they
# should be identical. Finally, the budget error is checked.
ex = ["ifmod_xt3d02"]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
# global convenience...
mname_ref = "refmodel"
mname_left = "leftmodel"
mname_right = "rightmodel"
hclose_check = 1e-9
useXT3D = True
def get_model(idx, dir):
name = ex[idx]
# parameters and spd
# tdis
nper = 1
tdis_rc = []
for i in range(nper):
tdis_rc.append((1.0, 1, 1))
# solver data
nouter, ninner = 100, 300
hclose, rclose, relax = hclose_check, 1e-3, 0.97
# model spatial discretization
nlay = 1
ncol = 10
ncol_left = 5
ncol_right = 5
nrow = 10
# cell spacing
delr = 10.0
delc = 10.0
area = delr * delc
# shift (hor. and vert.)
shift_some_x = -20 * delr # avoids overlap
shift_x = 5 * delr
shift_y = 0.0
# top/bot of the aquifer
tops = [0.0, -5.0]
# hydraulic conductivity
k11 = 10.0
k22 = 0.1
k_angle = 45.0
# boundary stress period data
h_left = -2.0
h_right = -2.0
# initial head
h_start = -2.0
# well
well_id = (0, 4, 4)
well_rate = -1.0
# This creates the single model, for reference:
left_chd = [[(0, irow, 0), h_left] for irow in range(nrow)]
right_chd = [[(0, irow, ncol - 1), h_right] for irow in range(nrow)]
chd_data = left_chd + right_chd
chd_spd = {0: chd_data}
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=dir
)
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
ims = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="DBD",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
relaxation_factor=relax,
)
gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_ref, save_flows=True)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
xorigin=shift_some_x,
yorigin=0.0,
top=tops[0],
botm=tops[1:],
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=h_start)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
save_specific_discharge=True,
xt3doptions=useXT3D,
save_flows=True,
icelltype=0,
k=k11,
k22=k22,
angle1=k_angle,
)
# chd file
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd)
# well
wel1 = flopy.mf6.ModflowGwfwel(
gwf,
stress_period_data=[[well_id, well_rate]],
print_input=True,
print_flows=True,
save_flows=False,
pname="WEL-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
head_filerecord="{}.hds".format(mname_ref),
budget_filerecord="{}.cbc".format(mname_ref),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
# Now create two coupled models with the interface model enabled,
# to be stored in the same solution as the reference model
# submodel on the left:
left_chd = [[(0, irow, 0), h_left] for irow in range(nrow)]
chd_spd_left = {0: left_chd}
gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_left, save_flows=True)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol_left,
delr=delr,
delc=delc,
top=tops[0],
botm=tops[1:],
)
ic = flopy.mf6.ModflowGwfic(gwf, strt=h_start)
npf = flopy.mf6.ModflowGwfnpf(
gwf,
save_specific_discharge=True,
xt3doptions=useXT3D,
save_flows=True,
icelltype=0,
k=k11,
k22=k22,
angle1=k_angle,
)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd_left)
oc = flopy.mf6.ModflowGwfoc(
gwf,
head_filerecord="{}.hds".format(mname_left),
budget_filerecord="{}.cbc".format(mname_left),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
wel1 = flopy.mf6.ModflowGwfwel(
gwf,
stress_period_data=[[well_id, well_rate]],
print_input=True,
print_flows=True,
save_flows=False,
pname="WEL-1",
)
# submodel on the right:
right_chd = [[(0, irow, ncol_right - 1), h_right] for irow in range(nrow)]
chd_spd_right = {0: right_chd}
gwf = flopy.mf6.ModflowGwf(sim, modelname=mname_right, save_flows=True)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol_right,
delr=delr,
delc=delc,
xorigin=shift_x,
yorigin=shift_y,
top=tops[0],
botm=tops[1:],
)
ic = flopy.mf6.ModflowGwfic(gwf, strt=h_start)
npf = flopy.mf6.ModflowGwfnpf(
gwf,
save_specific_discharge=True,
xt3doptions=useXT3D,
save_flows=True,
icelltype=0,
k=k11,
k22=k22,
angle1=k_angle,
)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd_right)
oc = flopy.mf6.ModflowGwfoc(
gwf,
head_filerecord="{}.hds".format(mname_right),
budget_filerecord="{}.cbc".format(mname_right),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
# exchangedata
angldegx = 0.0
cdist = delr
gwfgwf_data = [
[
(0, irow, ncol_left - 1),
(0, irow, 0),
1,
delr / 2.0,
delr / 2.0,
delc,
angldegx,
cdist,
]
for irow in range(nrow)
]
gwfgwf = flopy.mf6.ModflowGwfgwf(
sim,
exgtype="GWF6-GWF6",
nexg=len(gwfgwf_data),
exgmnamea=mname_left,
exgmnameb=mname_right,
exchangedata=gwfgwf_data,
auxiliary=["ANGLDEGX", "CDIST"],
xt3d=useXT3D,
)
return sim
def build_model(idx, exdir):
sim = get_model(idx, exdir)
return sim, None
def qxqyqz(fname, nlay, nrow, ncol):
nodes = nlay * nrow * ncol
cbb = flopy.utils.CellBudgetFile(fname, precision="double")
spdis = cbb.get_data(text="DATA-SPDIS")[0]
qx = np.ones((nodes), dtype=float) * 1.0e30
qy = np.ones((nodes), dtype=float) * 1.0e30
qz = np.ones((nodes), dtype=float) * 1.0e30
n0 = spdis["node"] - 1
qx[n0] = spdis["qx"]
qy[n0] = spdis["qy"]
qz[n0] = spdis["qz"]
qx = qx.reshape(nlay, nrow, ncol)
qy = qy.reshape(nlay, nrow, ncol)
qz = qz.reshape(nlay, nrow, ncol)
qx = np.ma.masked_equal(qx, 1.0e30)
qy = np.ma.masked_equal(qy, 1.0e30)
qz = np.ma.masked_equal(qz, 1.0e30)
return qx, qy, qz
def compare_to_ref(sim):
print("comparing heads and spec. discharge to single model reference...")
fpth = os.path.join(sim.simpath, "{}.hds".format(mname_ref))
hds = flopy.utils.HeadFile(fpth)
heads = hds.get_data()
fpth = os.path.join(sim.simpath, "{}.cbc".format(mname_ref))
nlay, nrow, ncol = heads.shape
qxb, qyb, qzb = qxqyqz(fpth, nlay, nrow, ncol)
fpth = os.path.join(sim.simpath, "{}.hds".format(mname_left))
hds = flopy.utils.HeadFile(fpth)
heads_left = hds.get_data()
fpth = os.path.join(sim.simpath, "{}.cbc".format(mname_left))
nlay, nrow, ncol = heads_left.shape
qxb_left, qyb_left, qzb_left = qxqyqz(fpth, nlay, nrow, ncol)
fpth = os.path.join(sim.simpath, "{}.hds".format(mname_right))
hds = flopy.utils.HeadFile(fpth)
heads_right = hds.get_data()
fpth = os.path.join(sim.simpath, "{}.cbc".format(mname_right))
nlay, nrow, ncol = heads_right.shape
qxb_right, qyb_right, qzb_right = qxqyqz(fpth, nlay, nrow, ncol)
heads_2models = np.append(heads_left[0], heads_right[0], axis=1)
# compare heads
maxdiff = np.amax(abs(heads - heads_2models))
assert (
maxdiff < 10 * hclose_check
), "Max. head diff. {} should \
be within solver tolerance (x10): {}".format(
maxdiff, 10 * hclose_check
)
# compare spdis_x left
maxdiff = np.amax(abs(qxb[:, :, 0:5] - qxb_left))
assert (
maxdiff < 10 * hclose_check
), "Max. diff. in spec. discharge (x) {} \
should be within solver tolerance (x10): {}".format(
maxdiff, 10 * hclose_check
)
# compare spdis_y left
maxdiff = np.amax(abs(qyb[:, :, 0:5] - qyb_left))
assert (
maxdiff < 10 * hclose_check
), "Max. diff. in spec. discharge (y) {} \
should be within solver tolerance (x10): {}".format(
maxdiff, 10 * hclose_check
)
# compare spdis_z left
maxdiff = np.amax(abs(qzb[:, :, 0:5] - qzb_left))
assert (
maxdiff < 10 * hclose_check
), "Max. diff. in spec. discharge (z) {} \
should be within solver tolerance (x10): {}".format(
maxdiff, 10 * hclose_check
)
# compare spdis_x right
maxdiff = np.amax(abs(qxb[:, :, 5:] - qxb_right))
assert (
maxdiff < 10 * hclose_check
), "Max. diff. in spec. discharge (x) {} \
should be within solver tolerance (x10): {}".format(
maxdiff, 10 * hclose_check
)
# compare spdis_y right
maxdiff = np.amax(abs(qyb[:, :, 5:] - qyb_right))
| |
import json
import logging
import os
import time
from pathlib import Path
import click
import requests
from requests import HTTPError
from tenacity import retry, stop_after_delay, wait_fixed
from n26.config import Config, MFA_TYPE_SMS
from n26.const import DAILY_WITHDRAWAL_LIMIT, DAILY_PAYMENT_LIMIT
from n26.util import create_request_url
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto.Util.Padding import pad
import base64
import json
LOGGER = logging.getLogger(__name__)
BASE_URL_DE = 'https://api.tech26.de'
BASE_URL_GLOBAL = 'https://api.tech26.global'
BASIC_AUTH_HEADERS = {"Authorization": "Basic bXktdHJ1c3RlZC13ZHBDbGllbnQ6c2VjcmV0"}
USER_AGENT = ("Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/59.0.3071.86 Safari/537.36")
GET = "get"
POST = "post"
EXPIRATION_TIME_KEY = "expiration_time"
ACCESS_TOKEN_KEY = "access_token"
REFRESH_TOKEN_KEY = "refresh_token"
GRANT_TYPE_PASSWORD = "password"
GRANT_TYPE_REFRESH_TOKEN = "refresh_token"
class Api(object):
"""
Api class can be imported as a library in order to use it within applications
"""
def __init__(self, cfg: Config = None):
"""
Constructor accepting None to maintain backward compatibility
:param cfg: configuration object
"""
if not cfg:
cfg = Config()
self.config = cfg
self._token_data = {}
BASIC_AUTH_HEADERS["device-token"] = self.config.DEVICE_TOKEN.value
@property
def token_data(self) -> dict:
if self.config.LOGIN_DATA_STORE_PATH.value is None:
return self._token_data
else:
return self._read_token_file(self.config.LOGIN_DATA_STORE_PATH.value)
@token_data.setter
def token_data(self, data: dict):
if self.config.LOGIN_DATA_STORE_PATH.value is None:
self._token_data = data
else:
self._write_token_file(data, self.config.LOGIN_DATA_STORE_PATH.value)
@staticmethod
def _read_token_file(path: str) -> dict:
"""
:return: the stored token data or an empty dict
"""
LOGGER.debug("Reading token data from {}".format(path))
path = Path(path).expanduser().resolve()
if not path.exists():
return {}
if not path.is_file():
raise IsADirectoryError("File path exists and is not a file: {}".format(path))
if path.stat().st_size <= 0:
# file is empty
return {}
with open(path, "r") as file:
return json.loads(file.read())
@staticmethod
def _write_token_file(token_data: dict, path: str):
LOGGER.debug("Writing token data to {}".format(path))
path = Path(path).expanduser().resolve()
# delete existing file if permissions don't match or file size is abnormally small
if path.exists() and (path.stat().st_mode != 0o100600 or path.stat().st_size < 10):
path.unlink()
path.parent.mkdir(parents=True, exist_ok=True, mode=0o700)
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, 0o600), 'w') as file:
file.seek(0)
file.write(json.dumps(token_data, indent=2))
file.truncate()
# IDEA: @get_token decorator
def get_account_info(self) -> dict:
"""
Retrieves basic account information
"""
return self._do_request(GET, BASE_URL_DE + '/api/me')
def get_account_statuses(self) -> dict:
"""
Retrieves additional account information
"""
return self._do_request(GET, BASE_URL_DE + '/api/me/statuses')
def get_addresses(self) -> dict:
"""
Retrieves a list of addresses of the account owner
"""
return self._do_request(GET, BASE_URL_DE + '/api/addresses')
def get_balance(self) -> dict:
"""
Retrieves the current balance
"""
return self._do_request(GET, BASE_URL_DE + '/api/accounts')
def get_spaces(self) -> dict:
"""
Retrieves a list of all spaces
"""
return self._do_request(GET, BASE_URL_DE + '/api/spaces')
def barzahlen_check(self) -> dict:
return self._do_request(GET, BASE_URL_DE + '/api/barzahlen/check')
def get_cards(self):
"""
Retrieves a list of all cards
"""
return self._do_request(GET, BASE_URL_DE + '/api/v2/cards')
def get_account_limits(self) -> list:
"""
Retrieves a list of all active account limits
"""
return self._do_request(GET, BASE_URL_DE + '/api/settings/account/limits')
def set_account_limits(self, daily_withdrawal_limit: int = None, daily_payment_limit: int = None) -> None:
"""
Sets account limits
:param daily_withdrawal_limit: daily withdrawal limit
:param daily_payment_limit: daily payment limit
"""
if daily_withdrawal_limit is not None:
self._do_request(POST, BASE_URL_DE + '/api/settings/account/limits', json={
"limit": DAILY_WITHDRAWAL_LIMIT,
"amount": daily_withdrawal_limit
})
if daily_payment_limit is not None:
self._do_request(POST, BASE_URL_DE + '/api/settings/account/limits', json={
"limit": DAILY_PAYMENT_LIMIT,
"amount": daily_payment_limit
})
def get_contacts(self):
"""
Retrieves a list of all contacts
"""
return self._do_request(GET, BASE_URL_DE + '/api/smrt/contacts')
def get_standing_orders(self) -> dict:
"""
Get a list of standing orders
"""
return self._do_request(GET, BASE_URL_DE + '/api/transactions/so')
def get_transactions(self, from_time: int = None, to_time: int = None, limit: int = 20, pending: bool = None,
categories: str = None, text_filter: str = None, last_id: str = None) -> dict:
"""
Get a list of transactions.
Note that some parameters can not be combined in a single request (like text_filter and pending) and
will result in a bad request (400) error.
:param from_time: earliest transaction time as a Timestamp > 0 - milliseconds since 1970 in CET
:param to_time: latest transaction time as a Timestamp > 0 - milliseconds since 1970 in CET
:param limit: Limit the number of transactions to return to the given amount - default 20 as the n26 API returns
only the last 20 transactions by default
:param pending: show only pending transactions
:param categories: Comma separated list of category IDs
:param text_filter: Query string to search for
:param last_id: ??
:return: list of transactions
"""
if pending and limit:
# pending does not support limit
limit = None
return self._do_request(GET, BASE_URL_DE + '/api/smrt/transactions', {
'from': from_time,
'to': to_time,
'limit': limit,
'pending': pending,
'categories': categories,
'textFilter': text_filter,
'lastId': last_id
})
def get_transactions_limited(self, limit: int = 5) -> dict:
import warnings
warnings.warn(
"get_transactions_limited is deprecated, use get_transactions(limit=5) instead",
DeprecationWarning
)
return self.get_transactions(limit=limit)
def get_statements(self) -> list:
"""
Retrieves a list of all statements
"""
return self._do_request(GET, BASE_URL_DE + '/api/statements')
def block_card(self, card_id: str) -> dict:
"""
Blocks a card.
If the card is already blocked this will have no effect.
:param card_id: the id of the card to block
:return: some info about the card (not including it's blocked state... thanks n26!)
"""
return self._do_request(POST, BASE_URL_DE + '/api/cards/%s/block' % card_id)
def unblock_card(self, card_id: str) -> dict:
"""
Unblocks a card.
If the card is already unblocked this will have no effect.
:param card_id: the id of the card to block
:return: some info about the card (not including it's unblocked state... thanks n26!)
"""
return self._do_request(POST, BASE_URL_DE + '/api/cards/%s/unblock' % card_id)
def get_savings(self) -> dict:
return self._do_request(GET, BASE_URL_DE + '/api/hub/savings/accounts')
def get_statistics(self, from_time: int = 0, to_time: int = int(time.time()) * 1000) -> dict:
"""
Get statistics in a given time frame
:param from_time: Timestamp - milliseconds since 1970 in CET
:param to_time: Timestamp - milliseconds since 1970 in CET
"""
if not from_time:
from_time = 0
if not to_time:
to_time = int(time.time()) * 1000
return self._do_request(GET, BASE_URL_DE + '/api/smrt/statistics/categories/%s/%s' % (from_time, to_time))
def get_available_categories(self) -> list:
return self._do_request(GET, BASE_URL_DE + '/api/smrt/categories')
def get_invitations(self) -> list:
return self._do_request(GET, BASE_URL_DE + '/api/aff/invitations')
def _do_request(self, method: str = GET, url: str = "/", params: dict = None,
json: dict = None, headers: dict = None) -> list or dict or None:
"""
Executes a http request based on the given parameters
:param method: the method to use (GET, POST)
:param url: the url to use
:param params: query parameters that will be appended to the url
:param json: request body
:param headers: custom headers
:return: the response parsed as a json
"""
access_token = self.get_token()
_headers = {'Authorization': 'Bearer {}'.format(access_token)}
if headers is not None:
_headers.update(headers)
url = create_request_url(url, params)
if method is GET:
response = requests.get(url, headers=_headers, json=json)
elif method is POST:
response = requests.post(url, headers=_headers, json=json)
else:
raise ValueError("Unsupported method: {}".format(method))
response.raise_for_status()
# some responses do not return data so we just ignore the body in that case
if len(response.content) > 0:
return response.json()
def get_encryption_key(self, public_key: str = None) -> dict:
"""
Receive public encryption key for the JSON String containing the PIN encryption key
"""
return self._do_request(GET, BASE_URL_DE + '/api/encryption/key', params={
'publicKey': public_key
})
def encrypt_user_pin(self, pin: str):
"""
Encrypts user PIN and prepares it in a format required for a transaction order
:return: encrypted and base64 encoded PIN as well as an encrypted and base64 encoded
JSON containing the PIN encryption key
"""
# generate AES256 key and IV
random_password = Random.get_random_bytes(32)
salt = Random.get_random_bytes(16)
# noinspection PyTypeChecker
key = PBKDF2(random_password, salt, 32, count=1000000, hmac_hash_module=SHA512)
iv = Random.new().read(AES.block_size)
key64 = base64.b64encode(key).decode('utf-8')
iv64 = base64.b64encode(iv).decode('utf-8')
# encode the key and iv as a json string
aes_secret = {
'secretKey': key64,
'iv': iv64
}
# json string has to be represented in byte form for encryption
unencrypted_aes_secret = bytes(json.dumps(aes_secret), 'utf-8')
# Encrypt the secret JSON with RSA using the provided public key
public_key = self.get_encryption_key()
public_key_non64 = base64.b64decode(public_key['publicKey'])
public_key_object = RSA.importKey(public_key_non64)
public_key_cipher = PKCS1_v1_5.new(public_key_object)
encrypted_secret = public_key_cipher.encrypt(unencrypted_aes_secret)
encrypted_secret64 = base64.b64encode(encrypted_secret)
# Encrypt user's pin
private_key_cipher = AES.new(key=key, mode=AES.MODE_CBC, iv=iv)
# the pin has to be padded and transformed into bytes for a correct ecnryption format
encrypted_pin = private_key_cipher.encrypt(pad(bytes(pin, 'utf-8'), 16))
encrypted_pin64 = base64.b64encode(encrypted_pin)
return encrypted_secret64, encrypted_pin64
def create_transaction(self, iban: str, bic: str, name: str, reference: str, amount: float, pin: str):
"""
| |
from builtins import object
import uuid
from axes.handlers.proxy import AxesProxyHandler
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model, password_validation, authenticate
from django.contrib.auth.hashers import make_password
from django.contrib.auth.password_validation import validate_password
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers, exceptions, validators
from rest_framework_jwt.serializers import JSONWebTokenSerializer
from rest_framework_jwt.settings import api_settings
import passwordmeter
from bluebottle.bluebottle_drf2.serializers import SorlImageField, ImageSerializer
from bluebottle.clients import properties
from bluebottle.geo.models import Location, Place
from bluebottle.geo.serializers import PlaceSerializer
from bluebottle.initiatives.models import Theme
from bluebottle.members.models import MemberPlatformSettings, UserActivity, UserSegment
from bluebottle.organizations.serializers import OrganizationSerializer
from bluebottle.segments.models import Segment
from bluebottle.segments.serializers import SegmentTypeSerializer
from bluebottle.time_based.models import Skill
from bluebottle.utils.serializers import PermissionField, TruncatedCharField, CaptchaField
BB_USER_MODEL = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
class AxesJSONWebTokenSerializer(JSONWebTokenSerializer):
def validate(self, attrs):
credentials = {
self.username_field: attrs.get(self.username_field),
'password': attrs.get('password')
}
if all(credentials.values()):
request = self.context['request']
user = authenticate(request, **credentials)
if getattr(request, 'axes_locked_out', False):
raise exceptions.Throttled(
600, 'Too many failed password attempts.'
)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
payload = jwt_payload_handler(user)
return {
'token': jwt_encode_handler(payload),
'user': user
}
else:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "{username_field}" and "password".')
msg = msg.format(username_field=self.username_field)
raise serializers.ValidationError(msg)
class PrivateProfileMixin(object):
private_fields = (
'url', 'full_name', 'picture', 'about_me', 'location', 'last_name',
'phone_number', 'avatar', 'website', 'twitter', 'facebook', 'skypename'
)
def to_representation(self, obj):
data = super(PrivateProfileMixin, self).to_representation(obj)
user = self.context['request'].user
can_read_full_profile = self.context['request'].user.has_perm(
'members.api_read_full_member')
if obj != user and not can_read_full_profile:
for field in self.private_fields:
if field in data:
del data[field]
return data
class BaseUserPreviewSerializer(PrivateProfileMixin, serializers.ModelSerializer):
"""
Serializer for a subset of a member's public profile. This is usually
embedded into other serializers.
"""
def __init__(self, *args, **kwargs):
kwargs['read_only'] = True
super(BaseUserPreviewSerializer, self).__init__(*args, **kwargs)
avatar = SorlImageField('133x133', source='picture', crop='center')
# TODO: Remove first/last name and only use these
full_name = serializers.ReadOnlyField(
source='get_full_name', read_only=True)
short_name = serializers.ReadOnlyField(
source='get_short_name', read_only=True)
is_active = serializers.BooleanField(read_only=True)
is_anonymous = serializers.SerializerMethodField()
def get_is_anonymous(self, obj):
return False
class Meta(object):
model = BB_USER_MODEL
fields = ('id', 'first_name', 'last_name', 'initials', 'about_me',
'avatar', 'full_name', 'short_name', 'is_active', 'is_anonymous')
class AnonymizedUserPreviewSerializer(PrivateProfileMixin, serializers.ModelSerializer):
"""
Serializer for a subset of a member's public profile. This is usually
embedded into other serializers.
"""
is_anonymous = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
kwargs['read_only'] = True
super(AnonymizedUserPreviewSerializer, self).__init__(*args, **kwargs)
id = 0
def get_is_anonymous(self, obj):
return False
class Meta(object):
model = BB_USER_MODEL
fields = ('id', 'is_anonymous')
class UserPreviewSerializer(serializers.ModelSerializer):
"""
User preview serializer that respects anonymization_age
"""
def to_representation(self, instance):
if self.parent.__class__.__name__ == 'ReactionSerializer':
# For some reason self.parent.instance doesn't work on ReactionSerializer
if self.parent.instance:
if self.parent.instance.anonymized:
return {"id": 0, "is_anonymous": True}
else:
wallpost = self.parent.parent.parent.instance
if wallpost.anonymized:
return {"id": 0, "is_anonymous": True}
if self.parent and self.parent.instance and getattr(self.parent.instance, 'anonymized', False):
return {"id": 0, "is_anonymous": True}
return BaseUserPreviewSerializer(instance, context=self.context).to_representation(instance)
class Meta(object):
model = BB_USER_MODEL
fields = (
'id',
'first_name',
'last_name',
'initials',
'about_me',
'avatar',
'full_name',
'short_name',
'is_active'
)
class UserPermissionsSerializer(serializers.Serializer):
def get_attribute(self, obj):
return obj
project_list = PermissionField('initiative-list')
project_manage_list = PermissionField('initiative-list')
homepage = PermissionField('home-page-detail')
class Meta(object):
fields = [
'project_list',
'project_manage_list',
'homepage'
]
class CurrentUserSerializer(BaseUserPreviewSerializer):
"""
Serializer for the current authenticated user. This is the same as the
serializer for the member preview with the
addition of id_for_ember.
"""
# This is a hack to work around an issue with Ember-Data keeping the id as
# 'current'.
id_for_ember = serializers.IntegerField(source='id', read_only=True)
full_name = serializers.CharField(source='get_full_name', read_only=True)
permissions = UserPermissionsSerializer(read_only=True)
organization = OrganizationSerializer(
read_only=True, source='partner_organization'
)
segments = serializers.PrimaryKeyRelatedField(
many=True, queryset=Segment.objects
)
class Meta(object):
model = BB_USER_MODEL
fields = UserPreviewSerializer.Meta.fields + (
'id_for_ember', 'primary_language', 'email', 'full_name', 'phone_number',
'last_login', 'date_joined', 'location',
'verified', 'permissions', 'matching_options_set',
'organization', 'segments', 'required'
)
class OldSegmentSerializer(serializers.ModelSerializer):
type = SegmentTypeSerializer()
class Meta(object):
model = Segment
fields = (
'id', 'name', 'type'
)
class UserProfileSerializer(PrivateProfileMixin, serializers.ModelSerializer):
"""
Serializer for a member's public profile.
"""
url = serializers.HyperlinkedIdentityField(view_name='user-profile-detail',
lookup_field='pk')
picture = ImageSerializer(required=False)
date_joined = serializers.DateTimeField(read_only=True)
full_name = serializers.CharField(source='get_full_name', read_only=True)
short_name = serializers.CharField(source='get_short_name', read_only=True)
primary_language = serializers.CharField(required=False,
default=properties.LANGUAGE_CODE)
location = serializers.PrimaryKeyRelatedField(required=False, allow_null=True,
queryset=Location.objects)
avatar = SorlImageField('133x133', source='picture', crop='center',
required=False)
skill_ids = serializers.PrimaryKeyRelatedField(many=True,
source='skills',
required=False,
queryset=Skill.objects)
favourite_theme_ids = serializers.PrimaryKeyRelatedField(
many=True, source='favourite_themes', queryset=Theme.objects)
segments = serializers.PrimaryKeyRelatedField(
many=True, queryset=Segment.objects
)
is_active = serializers.BooleanField(read_only=True)
def save(self, *args, **kwargs):
instance = super().save(*args, **kwargs)
if 'location' in self.validated_data:
# if we are setting the location, make sure we verify the location too
instance.location_verified = True
instance.save()
if 'segments' in self.validated_data:
# if we are setting segments, make sure we verify them too
UserSegment.objects.filter(member_id=instance.pk).update(verified=True)
return instance
class Meta(object):
model = BB_USER_MODEL
fields = (
'id', 'url', 'full_name', 'short_name', 'initials', 'picture',
'primary_language', 'about_me', 'location', 'avatar', 'date_joined',
'is_active', 'website', 'twitter', 'facebook',
'skypename', 'skill_ids', 'favourite_theme_ids',
'subscribed', 'segments'
)
class UserActivitySerializer(serializers.ModelSerializer):
"""
Serializer for user activity (log paths)
"""
path = TruncatedCharField(length=200, required=False)
class Meta(object):
model = UserActivity
fields = (
'id',
'path',
)
class ManageProfileSerializer(UserProfileSerializer):
"""
Serializer for the a member's private profile.
"""
partial = True
from_facebook = serializers.SerializerMethodField()
place = PlaceSerializer(required=False, allow_null=True)
def get_from_facebook(self, instance):
try:
instance.social_auth.get(provider='facebook')
return True
except instance.social_auth.model.DoesNotExist:
return False
class Meta(object):
model = BB_USER_MODEL
fields = UserProfileSerializer.Meta.fields + (
'email', 'newsletter', 'campaign_notifications', 'matching_options_set', 'location',
'birthdate', 'gender', 'first_name', 'last_name', 'phone_number',
'from_facebook', 'place',
)
def update(self, instance, validated_data):
place = validated_data.pop('place', None)
if place:
if instance.place:
current_place = instance.place
for key, value in list(place.items()):
setattr(current_place, key, value)
current_place.save()
else:
instance.place = Place.objects.create(**place)
else:
if instance.place:
instance.place = None
return super(ManageProfileSerializer, self).update(instance, validated_data)
class UserDataExportSerializer(UserProfileSerializer):
"""
Serializer for the a member's data dump.
"""
class Meta(object):
model = BB_USER_MODEL
fields = (
'id', 'email', 'location', 'birthdate',
'url', 'full_name', 'short_name', 'initials', 'picture',
'gender', 'first_name', 'last_name', 'phone_number',
'primary_language', 'about_me', 'location', 'avatar',
'date_joined', 'website', 'twitter', 'facebook',
'skypename', 'skills', 'favourite_themes'
)
class PasswordValidator(object):
requires_context = True
def __call__(self, value, serializer_field):
if serializer_field.parent.instance:
user = serializer_field.parent.instance
else:
user = None
password_validation.validate_password(value, user)
return value
# Thanks to <NAME> for this code:
# https://groups.google.com/d/msg/django-rest-framework/abMsDCYbBRg/d2orqUUdTqsJ
class PasswordField(serializers.CharField):
""" Special field to update a password field. """
widget = forms.widgets.PasswordInput
hidden_password_string = '********'
def __init__(self, **kwargs):
super(PasswordField, self).__init__(**kwargs)
validator = PasswordValidator()
self.validators.append(validator)
def to_representation(self, value):
""" Hide hashed-password in API display. """
return self.hidden_password_string
class SignUpTokenSerializer(serializers.ModelSerializer):
"""
Serializer for creating users. This can only be used for creating
users (POST) and should not be used for listing,
editing or viewing users.
"""
email = serializers.EmailField(max_length=254)
url = serializers.CharField(required=False, allow_blank=True)
segment_id = serializers.CharField(required=False, allow_blank=True)
class Meta(object):
model = BB_USER_MODEL
fields = ('id', 'email', 'url', 'segment_id')
def validate_email(self, email):
settings = MemberPlatformSettings.objects.get()
if (
settings.email_domain and
not email.endswith('@{}'.format(settings.email_domain))
):
raise serializers.ValidationError(
('Only emails for the domain {} are allowed').format(
settings.email_domain)
)
if len(BB_USER_MODEL.objects.filter(email__iexact=email, is_active=True)):
raise serializers.ValidationError(
'a member with this email address already exists.', code='duplicate-facebook',
)
return email
class JSONAPIMeta:
resource_name = 'signup-tokens'
class SignUpTokenConfirmationSerializer(serializers.ModelSerializer):
"""
Serializer for creating users. This can only be used for creating
users (POST) and should not be used for listing,
editing or viewing users.
"""
password = PasswordField(required=True, max_length=128)
token = serializers.CharField(required=True, max_length=128)
jwt_token = serializers.CharField(source='get_jwt_token', read_only=True)
first_name = serializers.CharField(max_length=100)
last_name = serializers.CharField(max_length=100)
class Meta(object):
model = BB_USER_MODEL
fields = ('id', 'password', 'token', 'jwt_token', 'first_name', 'last_name', )
def validate_password(self, password):
return make_password(password)
class JSONAPIMeta:
resource_name = 'signup-token-confirmations'
class PasswordStrengthSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
email = serializers.CharField(write_only=True, allow_blank=True)
strength = serializers.SerializerMethodField()
def validate(self, data):
user = BB_USER_MODEL(**data)
validate_password(data['password'], user)
return data
def get_strength(self, data):
strength, _ = passwordmeter.test(data['password'])
return strength
class Meta(object):
model = BB_USER_MODEL
fields = ('id', 'password', 'email', 'strength')
class JSONAPIMeta:
resource_name = 'password-strengths'
class UserCreateSerializer(serializers.ModelSerializer):
"""
Serializer for creating users. This can only be used for creating
users (POST) and should not be used for listing,
editing or viewing users.
"""
email = serializers.EmailField(
max_length=254,
validators=[
validators.UniqueValidator(
queryset=BB_USER_MODEL.objects.all(), lookup='iexact'
)
]
)
email_confirmation = serializers.EmailField(
label=_('email_confirmation'), max_length=254, required=False)
password = PasswordField(required=True, max_length=128)
token = serializers.CharField(required=False, max_length=128)
jwt_token = serializers.CharField(source='get_jwt_token', read_only=True)
primary_language = serializers.CharField(required=False)
@property
def errors(self):
errors = super(UserCreateSerializer, self).errors
if 'email' in errors and 'email' in self.data and errors['email'][0].code == 'unique':
user = self.Meta.model.objects.get(email__iexact=self.data['email'])
conflict = {
'email': user.email,
'id': user.id
}
# We assume if they have a social auth associated then they use it
if user.social_auth.count() > 0:
social_auth = user.social_auth.all()[0]
conflict['provider'] = social_auth.provider
conflict['type'] = 'social'
else:
conflict['type'] = 'email'
errors[
settings.REST_FRAMEWORK.get(
'NON_FIELD_ERRORS_KEY', 'non_field_errors')
] = [conflict]
request = self.context['request']
AxesProxyHandler.user_login_failed(self, {}, request)
if getattr(request, 'axes_locked_out', False):
raise exceptions.Throttled(
600, 'Too many | |
diff_rel)
return OptionalStartEnd(self.start_abs_msec, self.end_abs_msec)
HeapId = int
TimeForStageByHeap = Mapping[GcJoinStage, Mapping[HeapId, OptionalStartEnd]]
MutTimeForStageByHeap = Dict[GcJoinStage, Dict[HeapId, MutOptionalStartEnd]]
def get_join_stage_start_end_times_for_heaps(gc: AbstractTraceGC) -> TimeForStageByHeap:
return lazy_property(gc, get_join_stage_start_end_times_for_heaps_worker)
# For each stage, for each heap, get the first and last events in that stage.
def get_join_stage_start_end_times_for_heaps_worker(gc: AbstractTraceGC) -> TimeForStageByHeap:
start_end_for_stage_by_heap: MutTimeForStageByHeap = {}
def get_start_end(stage: GcJoinStage, heap: int) -> MutOptionalStartEnd:
return start_end_for_stage_by_heap.setdefault(stage, {}).setdefault(
heap, MutOptionalStartEnd()
)
all_heaps: Set[int] = set()
for i, heap in enumerate(gc.ServerGcHeapHistories):
assert heap.HeapId == i
all_heaps.add(heap.HeapId)
single_threaded_join_stage: Optional[GcJoinStage] = None
for join in heap.GcJoins:
stage = GcJoinStage(join.JoinID)
time = GcJoinTime(join.Time)
join_type = GcJoinType(join.Type)
if time == GcJoinTime.start:
if stage != GcJoinStage.restart:
start_end = get_start_end(stage, heap.HeapId)
start_end.n_starts += 1
start_end.start_rel_msec = join.RelativeTimestampMsc
start_end.start_abs_msec = join.AbsoluteTimestampMsc
if join_type == GcJoinType.last_join:
single_threaded_join_stage = GcJoinStage(stage)
elif time == GcJoinTime.end:
if stage == GcJoinStage.restart:
# assert False
if single_threaded_join_stage is not None:
# if True:
# assert False # TODO
start_end = start_end_for_stage_by_heap[single_threaded_join_stage][
heap.HeapId
]
start_end.end_abs_msec = join.AbsoluteTimestampMsc
start_end.end_rel_msec = join.RelativeTimestampMsc
single_threaded_join_stage = None
else:
# TODO: The time should only be there once, shouldn't it?
# Why are we joining the same stage multiple times?
# assert start_end_for_stage_by_heap[stage][heap.HeapId].end == 0
# start_end_for_stage_by_heap.setdefault(stage, {})
start_end = get_start_end(stage, heap.HeapId)
start_end.n_ends += 1
start_end.end_rel_msec = join.RelativeTimestampMsc
start_end.end_abs_msec = join.AbsoluteTimestampMsc
else:
raise Exception(time)
print("ALLHEAPS", all_heaps)
for stage, heap_to_start_end in start_end_for_stage_by_heap.items():
hp_num = 3
if hp_num in heap_to_start_end:
start_end = heap_to_start_end[3]
# print(
# f"{stage}: heap {hp_num} start_end is {start_end}, span is {start_end.finish(stage, hp_num).span_msec}"
# )
else:
print(f"heap {hp_num} has no stage {stage}")
def finish_stage(
stage: GcJoinStage, heap_to_start_end: Mapping[HeapId, MutOptionalStartEnd]
) -> Tuple[GcJoinStage, Mapping[HeapId, OptionalStartEnd]]:
# TODO: apparently one of the heaps may randomly be missing. Don't know why.
# seen_heaps = sorted(stage.keys())
# all_heaps = tuple(range(gc.HeapCount))
# assert seen_heaps == all_heaps, f"Expected heaps {all_heaps}, only got {seen_heaps}"
return (
stage,
map_mapping(lambda hp_num, se: (hp_num, se.finish(stage, hp_num)), heap_to_start_end),
)
return map_mapping(finish_stage, start_end_for_stage_by_heap)
@with_slots
@dataclass(frozen=True)
class StatsOverAllJoins:
median_join_msec: float
maximum_join_msec: float
minimum_join_msec: float
@with_slots
@dataclass(frozen=True)
class AbsPct:
absolute: float
percentage: float
@with_slots
@dataclass(frozen=True)
class ForHeap:
deviation_from_median_join_stage_duration: AbsPct
@with_slots
@dataclass(frozen=True)
class IndividualJoinStats:
join_stage_name: str
median_heap_join_msec: float
minimum_heap_join_msec: float # TODO: never used?
maximum_heap_join_msec: float
Heaps: Mapping[int, ForHeap]
@with_slots
@dataclass(frozen=True)
class StatsOverIndividualGcPhase:
median_phase_join_msec: float
max_phase_join_msec: float # TODO: never used?
min_phase_join_msec: float
deviation_from_median_join_msec: AbsPct
@with_slots
@dataclass(frozen=True)
class GcJoinStatistics:
statistics_over_all_joins: StatsOverAllJoins
statistics_over_individual_joins: Mapping[GcJoinStage, IndividualJoinStats]
statistics_over_individual_gc_phases: Mapping[GcJoinPhase, StatsOverIndividualGcPhase]
def _get_join_duration_by_heap(gc: AbstractTraceGC) -> Mapping[GcJoinStage, Mapping[HeapId, float]]:
all_times = _get_join_times_for_all_heaps(gc).unwrap() # TODO: handle err in unwrap
res: Dict[GcJoinStage, Dict[HeapId, float]] = {}
for hp_num, stage_to_times in enumerate(all_times):
for stage, times in stage_to_times.items():
# TODO: old gui assumed a given stage only happened once. That isn't true and this throws away data.
res.setdefault(stage, {})[hp_num] = times[-1]
return res
def _get_stats_for_join_phase(gc: AbstractTraceGC, join_stage: GcJoinStage) -> IndividualJoinStats:
heap_to_join_duration = _get_join_duration_by_heap(gc)[join_stage]
join_durations_for_stage = tuple(heap_to_join_duration.values())
median_join_msec_for_stage = median(join_durations_for_stage)
max_join_msec_for_stage = max(join_durations_for_stage)
min_join_msec_for_stage = min(join_durations_for_stage)
join_stage_name = GcJoinStage(join_stage).name
return IndividualJoinStats(
join_stage_name=join_stage_name,
median_heap_join_msec=median_join_msec_for_stage,
minimum_heap_join_msec=min_join_msec_for_stage,
maximum_heap_join_msec=max_join_msec_for_stage,
Heaps=_get_for_heaps(heap_to_join_duration),
)
def _get_for_heap(median_join_msec_for_stage: float, heap_join_duration: float) -> ForHeap:
absolute_deviation_from_median = abs(heap_join_duration - median_join_msec_for_stage)
percent_deviation_from_median = (
absolute_deviation_from_median / median_join_msec_for_stage * 100.0
)
return ForHeap(AbsPct(absolute_deviation_from_median, percent_deviation_from_median))
def _get_for_heaps(heap_to_join_duration: Mapping[int, float]) -> Mapping[int, ForHeap]:
median_join_msec_for_stage = median(heap_to_join_duration.values())
return {
heap_number: _get_for_heap(median_join_msec_for_stage, heap_join_duration)
for heap_number, heap_join_duration in heap_to_join_duration.items()
}
def get_gc_join_duration_statistics(gc: AbstractTraceGC) -> Result[str, GcJoinStatistics]:
assert _all_gc_join_ids_valid(gc)
join_duration_by_heap = _get_join_duration_by_heap(gc)
join_stage_to_individual_join_stats = {
join_stage: _get_stats_for_join_phase(gc, join_stage)
for join_stage in join_duration_by_heap
}
def f(all_join_durations_iter: Iterable[float]) -> GcJoinStatistics:
all_join_durations = tuple(all_join_durations_iter)
median_join_duration_all = median(all_join_durations)
all_join_stats = StatsOverAllJoins(
median_join_msec=median_join_duration_all,
maximum_join_msec=max(all_join_durations),
minimum_join_msec=min(all_join_durations),
)
join_duration_list_by_gc_phase = _get_join_duration_list_by_gc_phase(gc)
return GcJoinStatistics(
all_join_stats,
join_stage_to_individual_join_stats,
_get_stats_for_individual_phases(
join_duration_list_by_gc_phase, median_join_duration_all
),
)
return map_ok(_get_all_join_durations_for_all_heaps(gc), f)
def _get_join_duration_list_by_gc_phase(
gc: AbstractTraceGC,
) -> Mapping[GcJoinPhase, Sequence[float]]:
return make_multi_mapping(
(gc_phase_for_stage, join_duration)
for join_stage, heap_to_join_duration in _get_join_duration_by_heap(gc).items()
for gc_phase_for_stage in optional_to_iter(_try_get_join_phase(join_stage))
for join_duration in heap_to_join_duration.values()
)
def _get_stats_for_individual_phases(
join_duration_list_by_gc_phase: Mapping[GcJoinPhase, Sequence[float]],
all_joins_median_duration: float,
) -> Mapping[GcJoinPhase, StatsOverIndividualGcPhase]:
join_stats_by_gc_phase: Dict[GcJoinPhase, StatsOverIndividualGcPhase] = {}
for phase in GC_JOIN_STAGES_BY_GC_PHASE:
join_durations_for_all_joins_in_phase = join_duration_list_by_gc_phase.get(phase)
if join_durations_for_all_joins_in_phase is not None:
assert not is_empty(join_durations_for_all_joins_in_phase)
median_join_duration = median(join_durations_for_all_joins_in_phase)
absolute_deviation_from_median = abs(median_join_duration - all_joins_median_duration)
percent_deviation_from_median = (
absolute_deviation_from_median / all_joins_median_duration * 100.0
)
s = StatsOverIndividualGcPhase(
median_phase_join_msec=median_join_duration,
max_phase_join_msec=max(join_durations_for_all_joins_in_phase),
min_phase_join_msec=min(join_durations_for_all_joins_in_phase),
deviation_from_median_join_msec=AbsPct(
absolute_deviation_from_median, percent_deviation_from_median
),
)
add(join_stats_by_gc_phase, phase, s)
return join_stats_by_gc_phase
def get_gc_join_timeframes(
clr: Clr, gc: AbstractTraceGC
) -> Tuple[bool, Mapping[str, StartEnd], Mapping[str, StartEnd], Mapping[int, StartEnd]]:
phase_time_frames: Dict[GcJoinPhase, StartEnd] = {}
join_stage_time_frames: Dict[GcJoinStage, StartEnd] = {}
join_index_time_frames: Dict[int, StartEnd] = {}
can_determine_join_stages = _all_gc_join_ids_valid(gc)
if can_determine_join_stages:
times = _get_gc_join_stage_timeframes(gc)
# At this point, we now have the start/end times of each granular join stage. We want to
# turn this detailed information into high-level start/stop times for different GC phases,
# e.g. the mark phase or compact phase (if it occurs). From here, based on the join stage
# sequence observed in each phase, we can come up with some heuristics to determine the
# start of a phase, the end, or both.
start_for_phase: Dict[GcJoinPhase, float] = defaultdict(lambda: inf)
end_for_phase: Dict[GcJoinPhase, float] = defaultdict(float)
for phase, stages_within_phase in GC_JOIN_STAGES_BY_GC_PHASE.items():
# In general, we can always mark the end of a phase by enumerating all join stages in
# that stage and tracking the latest join end fired for a stage within that phase.
# Similarly, we can track the stage with the earliest join start fired within a phase
# and declare that as the start of our phase as an approximation.
for stage in stages_within_phase:
if (
stage in times.start_for_stage
and times.start_for_stage[stage].time < start_for_phase[phase]
):
start_for_phase[phase] = times.start_for_stage[stage].time
if (
stage in times.end_for_stage
and times.end_for_stage[stage].time > end_for_phase[phase]
):
end_for_phase[phase] = times.end_for_stage[stage].time
# The above is a good generalization/first step, but there are a few exceptions we need to
# make for certain GC phases:
# 1. The join within the plan phase is actually at the end of the plan phase.
# The beginning of the plan phase should be defined as the end of the mark phase.
# 2. The join within the sweep phase is also a marker for the end of the sweep phase. The
# beginning of the sweep phase should be defined as the end of the plan phase.
# 3. The post-GC work join is for the start of post-gc work. The above loop will actually
# mark the end of post-GC work as the end of this join which is incorrect; the actual
# end of this phase should be marked as the start of the EE restart.
start_for_phase[GcJoinPhase.plan] = end_for_phase[GcJoinPhase.mark]
if GcJoinPhase.sweep in start_for_phase:
start_for_phase[GcJoinPhase.sweep] = end_for_phase[GcJoinPhase.plan]
# I'll deal with the special case for post-GC work properly later.
# For now I'll define the end of post-GC work as the end of the GC entirely
# (which shouldn't be a horrible approximation anyways)
end_for_phase[GcJoinPhase.post_gc] = gc.PauseDurationMSec
for phase in start_for_phase:
phase_time_frames[phase] = StartEnd(start_for_phase[phase], end_for_phase[phase])
for stage in times.start_for_stage:
if stage in times.end_for_stage:
join_stage_time_frames[stage] = StartEnd(
times.start_for_stage[stage].time, times.end_for_stage[stage].time
)
else:
join_stage_time_frames[stage] = StartEnd(times.start_for_stage[stage].time, inf)
for stage in times.end_for_stage:
if stage in times.start_for_stage:
pass # We already covered this case in the loop above
else:
join_stage_time_frames[stage] = StartEnd(-inf, times.end_for_stage[stage].time)
else:
join_index_start_times, join_index_end_times = _get_join_index_start_end_times_for_heaps(
clr, gc
)
for join_index in join_index_start_times:
try:
min_join_start_time = min(join_index_start_times[join_index].values())
except ValueError:
min_join_start_time = -inf
try:
max_join_end_time = max(join_index_end_times[join_index].values())
except ValueError:
max_join_end_time = inf
join_index_time_frames[join_index] = StartEnd(min_join_start_time, max_join_end_time)
return (
can_determine_join_stages,
map_mapping_keys(lambda phase: phase.name, phase_time_frames),
map_mapping_keys(lambda stage: stage.name, join_stage_time_frames),
join_index_time_frames,
)
@with_slots
@dataclass(frozen=True)
class TimeAndHeap:
# For a start time, this has the time of the *first* heap's start, and that heap number.
# For an end time, this has the time of the *last* heap's end, and that heap number
time: float
heap_num: int
@with_slots
@dataclass(frozen=True)
class StartEndTimesForStages:
start_for_stage: Mapping[GcJoinStage, TimeAndHeap]
end_for_stage: Mapping[GcJoinStage, TimeAndHeap]
def _get_gc_join_stage_timeframes(gc: AbstractTraceGC) -> StartEndTimesForStages:
start_end_for_stage_by_heap = get_join_stage_start_end_times_for_heaps(gc)
# Finding the start/end of GC join stages is effectively a matter of finding the first
# join start fired and the last join end fired. Obviously there are some cases where this isn't
# going to be 100% correct, e.g. when the restart end happens after all other threads call
# join end and *especially* in cases if a thread gets switched out during single-threaded
# mode. However, we would still expect someone to find this relatively easily in a timeline
# view despite this error.
start_for_stage = {}
end_for_stage = | |
<gh_stars>10-100
# MIT License
#
# Copyright (c) 2018 kuangliu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
#
# MIT License
#
# Copyright (c) 2017 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data
from PIL import Image
from xml.etree import ElementTree
import os
import glob
from pathlib import Path
import numpy as np
import random
import itertools
import torch.nn.functional as F
try:
import ujson as json
except ImportError:
import json
import gc
import time
import bz2
import pickle
from math import sqrt, ceil, cos, sin, pi
from mlperf_logging.mllog import constants
from mlperf_logger import log_event
from SSD import _C as C
from fused_color_jitter import FusedColorJitter
# This function is from https://github.com/kuangliu/pytorch-ssd
def calc_iou_tensor(box1, box2):
""" Calculation of IoU based on two boxes tensor,
Reference to https://github.com/kuangliu/pytorch-ssd
input:
box1 (N, 4)
box2 (M, 4)
output:
IoU (N, M)
"""
N = box1.size(0)
M = box2.size(0)
be1 = box1.unsqueeze(1).expand(-1, M, -1)
be2 = box2.unsqueeze(0).expand(N, -1, -1)
# Left Top & Right Bottom
lt = torch.max(be1[:,:,:2], be2[:,:,:2])
#mask1 = (be1[:,:, 0] < be2[:,:, 0]) ^ (be1[:,:, 1] < be2[:,:, 1])
#mask1 = ~mask1
rb = torch.min(be1[:,:,2:], be2[:,:,2:])
#mask2 = (be1[:,:, 2] < be2[:,:, 2]) ^ (be1[:,:, 3] < be2[:,:, 3])
#mask2 = ~mask2
delta = rb - lt
delta[delta < 0] = 0
intersect = delta[:,:,0]*delta[:,:,1]
#*mask1.float()*mask2.float()
delta1 = be1[:,:,2:] - be1[:,:,:2]
area1 = delta1[:,:,0]*delta1[:,:,1]
delta2 = be2[:,:,2:] - be2[:,:,:2]
area2 = delta2[:,:,0]*delta2[:,:,1]
iou = intersect/(area1 + area2 - intersect)
return iou
# This class is from https://github.com/chauhan-utk/ssd.DomainAdaptation
class SSDCropping(object):
""" Cropping for SSD, according to original paper
Choose between following 3 conditions:
1. Preserve the original image
2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9
3. Random crop
Reference to https://github.com/chauhan-utk/ssd.DomainAdaptation
"""
def __init__(self):
self.sample_options = (
# Do nothing
None,
# min IoU, max IoU
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
# no IoU requirements
(None, None),
)
# Implementation uses 1 iteration to find a possible candidate, this
# was shown to produce the same mAP as using more iterations.
self.num_cropping_iterations = 1
log_event(key=constants.MAX_SAMPLES,
value=self.num_cropping_iterations)
def __call__(self, img, img_size, bboxes, labels):
# Ensure always return cropped image
while True:
mode = random.choice(self.sample_options)
if mode is None:
return img, img_size, bboxes, labels
htot, wtot = img_size
min_iou, max_iou = mode
min_iou = float("-inf") if min_iou is None else min_iou
max_iou = float("+inf") if max_iou is None else max_iou
# Implementation use 50 iteration to find possible candidate
for _ in range(self.num_cropping_iterations):
# suze of each sampled path in [0.1, 1] 0.3*0.3 approx. 0.1
w = random.uniform(0.3 , 1.0)
h = random.uniform(0.3 , 1.0)
if w/h < 0.5 or w/h > 2:
continue
# left 0 ~ wtot - w, top 0 ~ htot - h
left = random.uniform(0, 1.0 - w)
top = random.uniform(0, 1.0 - h)
right = left + w
bottom = top + h
ious = calc_iou_tensor(bboxes, torch.tensor([[left, top, right, bottom]]))
# tailor all the bboxes and return
if not ((ious > min_iou) & (ious < max_iou)).all():
continue
# discard any bboxes whose center not in the cropped image
xc = 0.5*(bboxes[:, 0] + bboxes[:, 2])
yc = 0.5*(bboxes[:, 1] + bboxes[:, 3])
masks = (xc > left) & (xc < right) & (yc > top) & (yc < bottom)
# if no such boxes, continue searching again
if not masks.any():
continue
bboxes[bboxes[:, 0] < left, 0] = left
bboxes[bboxes[:, 1] < top, 1] = top
bboxes[bboxes[:, 2] > right, 2] = right
bboxes[bboxes[:, 3] > bottom, 3] = bottom
#print(left, top, right, bottom)
#print(labels, bboxes, masks)
bboxes = bboxes[masks, :]
labels = labels[masks]
left_idx = int(left*wtot)
top_idx = int(top*htot)
right_idx = int(right*wtot)
bottom_idx = int(bottom*htot)
#print(left_idx,top_idx,right_idx,bottom_idx)
#img = img[:, top_idx:bottom_idx, left_idx:right_idx]
img = img.crop((left_idx, top_idx, right_idx, bottom_idx))
bboxes[:, 0] = (bboxes[:, 0] - left)/w
bboxes[:, 1] = (bboxes[:, 1] - top)/h
bboxes[:, 2] = (bboxes[:, 2] - left)/w
bboxes[:, 3] = (bboxes[:, 3] - top)/h
htot = bottom_idx - top_idx
wtot = right_idx - left_idx
return img, (htot, wtot), bboxes, labels
# Don't need to cast to float, already there (from FusedColorJitter)
class ToTensor(object):
def __init__(self):
pass
def __call__(self, img):
img = torch.Tensor(np.array(img))
# Transform from HWC to CHW
img = img.permute(2, 0 ,1).div(255)
return img
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, bboxes):
if random.random() < self.p:
bboxes[:, 0], bboxes[:, 2] = 1.0 - bboxes[:, 2], 1.0 - bboxes[:, 0]
return image.transpose(Image.FLIP_LEFT_RIGHT), bboxes
return image, bboxes
# Do data augumentation
class SSDTransformer(object):
""" SSD Data Augumentation, according to original paper
Composed by several steps:
Cropping
Resize
Flipping
Jittering
"""
def __init__(self, size = (300, 300), val=False):
# define vgg16 mean
self.size = size
self.val = val
self.crop = SSDCropping()
self.img_trans = transforms.Compose([
transforms.Resize(self.size),
#transforms.ColorJitter(brightness=0.125, contrast=0.5,
# saturation=0.5, hue=0.05
#),
#transforms.ToTensor(),
FusedColorJitter(),
ToTensor(),
])
self.hflip = RandomHorizontalFlip()
# All Pytorch Tensor will be normalized
# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
normalization_mean = [0.485, 0.456, 0.406]
normalization_std = [0.229, 0.224, 0.225]
self.normalize = transforms.Normalize(mean=normalization_mean,
std=normalization_std)
self.trans_val = transforms.Compose([
transforms.Resize(self.size),
transforms.ToTensor(),
self.normalize,])
def __call__(self, img, img_size, bbox=None, label=None, max_num=200):
#img = torch.tensor(img)
if self.val:
bbox_out = torch.zeros(max_num, 4)
label_out = torch.zeros(max_num, dtype=torch.long)
bbox_out[:bbox.size(0), :] = bbox
label_out[:label.size(0)] = label
return self.trans_val(img), img_size, bbox_out, label_out
# random crop
img, img_size, bbox, label = self.crop(img, img_size, bbox, label)
# random horiz. flip
img, bbox = self.hflip(img, bbox)
# [Resize, ColorJitter, ToTensor]
img = self.img_trans(img).contiguous()
img = self.normalize(img)
return img, img_size, bbox, label
# Implement a datareader for COCO dataset
class COCODetection(data.Dataset):
def __init__(self, img_folder, annotate_file, transform=None, data=None):
self.img_folder = img_folder
self.annotate_file = annotate_file
if | |
<reponame>oliviertrottier/dendrites-ssd<gh_stars>1-10
import sys
import os
import json
import re
from argparse import ArgumentParser
from collections import OrderedDict
from utils.augmentations import SSDAugmentation, TreeAugmentation
# Get project and dataset directories across platform
from .host_config import get_host_config, CONFIGS_DIR
HOST_CONFIG = get_host_config()
ROOT_DIR = HOST_CONFIG['root']
# Allow the user to set the configs from the command line.
# Inspired from:
# https://github.com/ltrottier/pytorch-object-recognition/blob/master/opts.py
# Original author: <NAME>
parser = ArgumentParser()
# dataset
parser.add_argument('--dataset_dir', type=str,
help='Subdirectory of the host root directory.')
parser.add_argument('--dataset_name', type=str, default='Tree',
help='Name of dataset')
parser.add_argument('--dataset_num_classes', type=int, default=2,
help="Number of classes")
parser.add_argument('--dataset_classes_name', type=str, default=['branchpoints', 'branchtips'],
help="Name of classes")
parser.add_argument('--dataset_object_properties', type=str, default=['xmin', 'xmax', 'ymin', 'ymax', 'class'],
help='ordered object properties that appear in the ground truth and detection files.')
parser.add_argument('--dataset_augmentation', type=str, default='SSDAugmentation',
help='Type of augmentation scheme used when loading images.')
parser.add_argument('--dataset_images_dir', type=str, default='images/',
help='Subdirectory of dataset_dir where images are saved')
parser.add_argument('--dataset_bounding_boxes_dir', type=str, default='bounding_boxes/',
help='Subdirectory of dataset_dir where bounding boxes properties are saved')
# dataloader
parser.add_argument('--dataloader_batch_size', type=int, default=4,
help='Batch size for training')
parser.add_argument('--dataloader_num_workers', type=int, default=1,
help='Number of workers to load dataset')
# train
parser.add_argument('--train_cuda', type=bool, default=True,
help='Use CUDA to train the model')
parser.add_argument('--train_num_epochs', type=int, default=300,
help='Number of training epochs')
parser.add_argument('--train_start_epoch', type=int, default=0,
help='Starting epoch of training.')
parser.add_argument('--train_resume', type=str,
help='Checkpoint state_dict file in --output_weights_dir to resume training from')
parser.add_argument('--train_resume_weights_only', default=False, type=bool,
help='Resume only weights (not epoch, lr, etc)')
parser.add_argument('--train_lr_init', type=float, default=0.0001,
help='Initial learning rate')
parser.add_argument('--train_lr_schedule', type=int, default=[80, 160, 240, 280],
help='Epoch number when learning rate is reduced.')
parser.add_argument('--train_lr_decay', type=float, default=0.1,
help='Learning rate reduction (%%) applied at each epoch in --train_lr_schedule')
parser.add_argument('--train_momentum', type=float, default=0.9,
help='Momentum value for optimizer')
parser.add_argument('--train_weight_decay', type=float, default=5e-4,
help='Weigth decay for SGD')
parser.add_argument('--train_visdom', default=False, type=bool,
help='Use visdom to visualize')
# model
parser.add_argument('--model_basenet', type=str, default='vgg16_reducedfc.pth',
help='Pretrained base model')
parser.add_argument('--model_num_classes', type=str, default=parser.get_default("dataset_num_classes") + 1,
help='Number of classes that the model distinguishes. Background class adds 1.')
parser.add_argument('--model_pixel_means', type=int, default=[129, 129, 129],
help='Mean value of pixels. Subtracted before processing')
parser.add_argument('--model_feature_maps_dim', type=int, default=[38, 19, 10, 5, 3, 1],
help='Square dimension of feature maps.')
parser.add_argument('--model_input_size', type=int, default=300,
help='Square size of network input image')
parser.add_argument('--model_prior_box_scales', type=float,
default=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05],
help='Size of prior boxes relative to --model_input_size')
parser.add_argument('--model_prior_box_aspect_ratios', type=float,
default=[[1 / 2, 2], [1 / 2, 2, 1 / 3, 3], [1 / 2, 2, 1 / 3, 3], [1 / 2, 2, 1 / 3, 3],
[1 / 2, 2], [1 / 2, 2]],
help='Aspect ratios of prior boxes in each feature map')
parser.add_argument('--model_prior_box_clip', type=bool,
default=True,
help='Clip the prior box dimensions to fit the image.')
parser.add_argument('--model_prior_box_variance', type=float, default=[0.1, 0.2],
help='Variance used to encore/decode bounding boxes')
# eval
parser.add_argument('--eval_model_name',
default='ssd300_' + parser.get_default("dataset_name") + '_Final.pth', type=str,
help='trained model filename in --output_weights_dir used for evaluation')
parser.add_argument('--eval_overwrite_all_detections', default=False, type=bool,
help='Overwrite all_detections file')
parser.add_argument('--eval_confidence_threshold', default=0.01, type=float,
help='Discard detected boxes below confidence threshold')
parser.add_argument('--eval_top_k', default=50, type=int,
help='Restrict the number of predictions per image')
parser.add_argument('--eval_cuda', default=True, type=bool,
help='Use CUDA to evaluate the model')
# criterion
parser.add_argument('--criterion_train', type=str, default='multibox')
# output
parser.add_argument('--output_weights_dir', type=str, default='weights/',
help='Subdirectory of ROOT_DIR for saving training checkpoints')
parser.add_argument('--output_detections_dir', type=str, default='detections/',
help='Subdirectory of dataset_dir where detections are saved')
# SSD300 CONFIGS
# Bounding boxes colors
COLORS = ((255, 0, 0, 128), (0, 255, 0, 128), (0, 0, 255, 128),
(0, 255, 255, 128), (255, 0, 255, 128), (255, 255, 0, 128))
MEANS = (104, 117, 123)
# Note that 1 more class is added to the number of classes to account for the background class (0).
extra_configs = {'variance': [0.1, 0.2]}
# Configurations for recognizing branchpoints on 300x300 images of trees, without noise.
test_config = {
'dataset_dir': 'dir/test',
'dataset_num_classes': 3
}
# Configurations for specific synthesizers
tree_synth0_config = {
'pixel_means': (13, 35, 170),
'num_classes': 2,
'classes_name': ['background', 'branchpoints'],
'lr_steps': (60, 120, 180),
'N_epochs': 300,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'Tree',
}
tree_synth1_config = {
'pixel_means': (13, 35, 170),
'num_classes': 3,
'classes_name': ['background', 'branchpoints', 'branchtips'],
'lr_steps': (60, 120, 180),
'N_epochs': 300,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'Tree',
}
tree_synth2_config = {
'pixel_means': (129, 129, 129),
'num_classes': 3,
'classes_name': ['background', 'branchpoints', 'branchtips'],
'lr_steps': (60, 120, 180),
'N_epochs': 300,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'Tree',
}
# Configuration class definitions
class dataset:
def __init__(self, dir, name, num_classes, classes_name, images_dir, object_properties, augmentation,
bounding_boxes_dir):
self.dir = dir
self.name = name
self.num_classes = num_classes
self.classes_name = classes_name
self.images_dir = images_dir
self.object_properties = object_properties
self.augmentation = augmentation
self.bounding_boxes_dir = bounding_boxes_dir
class dataloader:
def __init__(self, batch_size, num_workers):
self.batch_size = batch_size
self.num_workers = num_workers
class train:
def __init__(self, cuda, num_epochs, start_epoch, resume, resume_weights_only,
lr_init, lr_schedule, lr_decay, momentum, weight_decay, visdom):
self.cuda = cuda
self.num_epochs = num_epochs
self.start_epoch = start_epoch
self.resume = resume
self.resume_weights_only = resume_weights_only
self.lr_init = lr_init
self.lr_schedule = lr_schedule
self.lr_decay = lr_decay
self.momentum = momentum
self.weight_decay = weight_decay
self.visdom = visdom
class model:
def __init__(self, basenet, num_classes, pixel_means, feature_maps_dim, input_size,
prior_box_scales, prior_box_aspect_ratios, prior_box_clip, prior_box_variance):
self.basenet = basenet
self.num_classes = num_classes
self.pixel_means = pixel_means
self.feature_maps_dim = feature_maps_dim
self.input_size = input_size
self.prior_box_scales = prior_box_scales
self.prior_box_aspect_ratios = prior_box_aspect_ratios
self.prior_box_clip = prior_box_clip
self.prior_box_variance = prior_box_variance
class eval:
def __init__(self, model_name, overwrite_all_detections, confidence_threshold, top_k, cuda):
self.model_name = model_name
self.overwrite_all_detections = overwrite_all_detections
self.confidence_threshold = confidence_threshold
self.top_k = top_k
self.cuda = cuda
class criterion:
def __init__(self, train):
self.train = train
class output:
def __init__(self, weights_dir, detections_dir):
self.weights_dir = weights_dir
self.detections_dir = detections_dir
class configs:
def __init__(self, dataset, dataloader, train, model, eval, criterion, output):
self.dataset = dataset
self.dataloader = dataloader
self.train = train
self.model = model
self.eval = eval
self.criterion = criterion
self.output = output
def build_absolute_paths(self):
self.dataset.dir = os.path.join(ROOT_DIR, self.dataset.dir)
self.dataset.bounding_boxes_dir = os.path.join(self.dataset.dir, self.dataset.bounding_boxes_dir)
self.dataset.images_dir = os.path.join(self.dataset.dir, self.dataset.images_dir)
self.output.weights_dir = os.path.join(ROOT_DIR, self.output.weights_dir)
self.output.detections_dir = os.path.join(self.dataset.dir, self.output.detections_dir)
self.model.basenet = os.path.join(self.output.weights_dir, self.model.basenet)
if self.train.resume:
self.train.resume = os.path.join(self.output.weights_dir, self.train.resume)
self.eval.model_name = os.path.join(self.output.weights_dir, self.eval.model_name)
def get_config_names(self):
conf_categories = list(vars(self).keys())
configuration_names = []
for category in conf_categories:
conf_names = list(vars(getattr(self, category)).keys())
for conf in conf_names:
configuration_names.append(category + '_' + conf)
return configuration_names
def replace(self, new_config_dict):
for new_conf_name in new_config_dict:
conf_tuple = separate_config_name(new_conf_name)
setattr(getattr(self, conf_tuple[0]), conf_tuple[1], new_config_dict[new_conf_name])
def dict(self):
dict = vars(self).copy()
for category in dict:
dict[category] = vars(dict[category])
return dict
def __str__(self):
conf_categories = list(vars(self).keys())
configurations = []
for category in conf_categories:
conf_names = list(vars(getattr(self, category)).keys())
for conf in conf_names:
configurations.append(
'{}_{} : {}\n'.format(category, conf, repr(getattr(getattr(self, category), conf))))
return "".join(configurations)
def create_config_obj(config_dict):
dataset_dict = config_dict['dataset']
dir = dataset_dict['dir']
name = dataset_dict['name']
num_classes = dataset_dict['num_classes']
classes_name = dataset_dict['classes_name']
images_dir = dataset_dict['images_dir']
object_properties = dataset_dict['object_properties']
augmentation = dataset_dict['augmentation']
bounding_boxes_dir = dataset_dict['bounding_boxes_dir']
dataset_conf = dataset(dir, name, num_classes, classes_name, images_dir, object_properties, augmentation,
bounding_boxes_dir)
dataloader_dict = config_dict['dataloader']
batch_size = dataloader_dict['batch_size']
num_workers = dataloader_dict['num_workers']
dataloader_conf = dataloader(batch_size, num_workers)
train_dict = config_dict['train']
cuda = train_dict['cuda']
num_epochs = train_dict['num_epochs']
start_epoch = train_dict['start_epoch']
resume = train_dict['resume']
resume_weights_only = train_dict['resume_weights_only']
lr_init = train_dict['lr_init']
lr_schedule = train_dict['lr_schedule']
lr_decay = train_dict['lr_decay']
momentum = train_dict['momentum']
weight_decay = train_dict['weight_decay']
visdom = train_dict['visdom']
train_conf = train(cuda, num_epochs, start_epoch, resume, resume_weights_only,
lr_init, lr_schedule, lr_decay, momentum, weight_decay, visdom)
model_dict = config_dict['model']
basenet = model_dict['basenet']
num_classes = model_dict['num_classes']
pixel_means = model_dict['pixel_means']
feature_maps_dim = model_dict['feature_maps_dim']
input_size = model_dict['input_size']
prior_box_scales = model_dict['prior_box_scales']
prior_box_aspect_ratios = model_dict['prior_box_aspect_ratios']
prior_box_clip = model_dict['prior_box_clip']
prior_box_variance = model_dict['prior_box_variance']
model_conf = model(basenet, num_classes, pixel_means, feature_maps_dim, input_size, prior_box_scales,
prior_box_aspect_ratios, prior_box_clip, prior_box_variance)
eval_dict = config_dict['eval']
model_name = eval_dict['model_name']
overwrite_all_detections = eval_dict['overwrite_all_detections']
confidence_threshold = eval_dict['confidence_threshold']
top_k = eval_dict['top_k']
cuda = eval_dict['cuda']
eval_conf = eval(model_name, overwrite_all_detections, confidence_threshold, top_k, cuda)
criterion_dict = config_dict['criterion']
criterion_conf = criterion(criterion_dict['train'])
output_dict = config_dict['output']
weights_dir = output_dict['weights_dir']
detections_dir = output_dict['detections_dir']
output_conf = output(weights_dir, detections_dir)
configs_obj = configs(dataset_conf, dataloader_conf, train_conf, model_conf, eval_conf, criterion_conf, output_conf)
# check that the config object has all the defined configurations
configuration_names = get_configs_name()
obj_configuration_names = set(configs_obj.get_config_names())
missing_configurations = set(configuration_names).difference(obj_configuration_names)
if missing_configurations:
raise Exception('The following configurations are missing: {}'.format(missing_configurations))
# throw warnings if some configurations were not used
input_configuration_names = set(join_configs_categories(config_dict).keys())
unused_configurations = input_configuration_names.difference(obj_configuration_names)
if unused_configurations:
print('WARNING! The following configurations have not been used: {}'.format(unused_configurations))
# configure the augmentation sheme
if configs_obj.dataset.augmentation == | |
dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: <NAME>. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data | |
3
Args:
file_path (str): path to pickle file
Returns:
obj (object): object saved in pickle file
'''
with open(file_path, 'rb') as f:
try:
obj = pickle.load(f, encoding='latin1')
except TypeError: # pragma no cover
obj = pickle.load(f) # pragma no cover
return obj
def rate_conversion(annual_rate, start_age, end_age, S):
'''
This function converts annual rates to model period ratesself.
Args:
annual_rate (array_like): annualized rates
start_age (int): age at which agents become economically active
end_age (int): maximum age of agents
S (int): number of model periods in agents life
Returns:
rate (array_like): model period rates
'''
rate = (1 + annual_rate) ** ((end_age - start_age) / S) - 1
return rate
def save_return_table(table_df, output_type, path, precision=2):
'''
Function to save or return a table of data.
Args:
table_df (Pandas DataFrame): table
output_type (string): specifies the type of file to save
table to: 'csv', 'tex', 'excel', 'json'
path (string): specifies path to save file with table to
precision (integer): number of significant digits to print.
Defaults to 0.
Returns:
table_df (Pandas DataFrame): table
'''
pd.options.display.float_format = (
'{:,.' + str(precision) + 'f}').format
if path is None:
if output_type == 'tex':
tab_str = table_df.to_latex(index=False, na_rep='')
return tab_str
elif output_type == 'json':
tab_str = table_df.to_json(double_precision=precision)
return tab_str
elif output_type == 'html':
tab_html = table_df.to_html(
classes="table table-striped table-hover"
).replace('\n', '')
tab_html.replace('\n', '')
return tab_html
else:
return table_df
else:
if output_type == 'tex':
table_df.to_latex(buf=path, index=False, na_rep='')
elif output_type == 'csv':
table_df.to_csv(path_or_buf=path, index=False, na_rep='')
elif output_type == 'json':
table_df.to_json(path_or_buf=path,
double_precision=precision)
elif output_type == 'excel':
table_df.to_excel(excel_writer=path, index=False, na_rep='')
else:
print('Please enter a valid output format') # pragma no cover
assert(False) # pragma no cover
class Inequality():
'''
A class with methods to compute different measures of inequality.
'''
def __init__(self, dist, pop_weights, ability_weights, S, J):
'''
Args:
dist (Numpy array): distribution of endogenous variables
over age and lifetime income group, size, SxJ
pop_weights (Numpy array): fraction of population by each
age, length S
ability_weights (Numpy array): fraction of population for
each lifetime income group, length J
S (int): number of economically active periods in lifetime
J (int): number of ability types
Returns:
None
'''
self.dist = dist
self.pop_weights = pop_weights
self.ability_weights = ability_weights
weights = (np.tile(pop_weights.reshape(S, 1), (1, J)) *
ability_weights.reshape(1, J))
flattened_dist = dist.flatten()
flattened_weights = weights.flatten()
idx = np.argsort(flattened_dist)
self.sort_dist = flattened_dist[idx]
self.sort_weights = flattened_weights[idx]
self.cum_weights = np.cumsum(self.sort_weights)
def gini(self, type='overall'):
'''
Compute the Gini coefficient
Args:
None
Returns:
gini_coeff (scalar): Gini coefficient
'''
if type == 'overall':
p = np.cumsum(self.sort_weights)
nu = np.cumsum(self.sort_dist * self.sort_weights)
elif type == 'age':
flattened_dist = self.dist.sum(axis=1).flatten()
flattened_weights = self.pop_weights.flatten()
idx = np.argsort(flattened_dist)
sort_dist = flattened_dist[idx]
sort_weights = flattened_weights[idx]/flattened_weights.sum()
p = np.cumsum(sort_weights)
nu = np.cumsum(sort_dist*sort_weights)
elif type == 'ability':
flattened_dist = self.dist.sum(axis=0).flatten()
flattened_weights = self.ability_weights.flatten()
idx = np.argsort(flattened_dist)
sort_dist = flattened_dist[idx]
sort_weights = flattened_weights[idx]/flattened_weights.sum()
p = np.cumsum(sort_weights)
nu = np.cumsum(sort_dist*sort_weights)
nu = nu / nu[-1]
gini_coeff = (nu[1:] * p[:-1]).sum() - (nu[:-1] * p[1:]).sum()
return gini_coeff
def var_of_logs(self):
'''
Compute the variance of logs
Args:
None
Returns:
var_ln_dist (scalar): variance of logs
'''
ln_dist = np.log(self.sort_dist)
weight_mean = ((
ln_dist * self.sort_weights).sum() / self.sort_weights.sum())
var_ln_dist = ((
(self.sort_weights * ((ln_dist - weight_mean) ** 2)).sum())
* (1. / (self.sort_weights.sum())))
return var_ln_dist
def ratio_pct1_pct2(self, pct1, pct2):
'''
Compute the pct1/pct2 percentile ratio
Args:
pct1 (scalar): percentile to compute the top pctile% for,
in (0, 1).
pct2 (scalar): percentile to compute the top pctile% for,
in (0, 1)
Returns:
pct_ratio (scalar): ratio of pct1 to pct2
Notes:
usually pct1 > pct 2
'''
assert pct1 > 0
assert pct1 < 1
assert pct2 > 0
assert pct2 < 1
loc_pct1 = np.argmin(np.abs(self.cum_weights - pct1))
loc_pct2 = np.argmin(np.abs(self.cum_weights - pct2))
pct_ratio = self.sort_dist[loc_pct1] / self.sort_dist[loc_pct2]
return pct_ratio
def pct(self, pct):
'''
Returns value at given percentile
Args:
pct1 (scalar): percentile to compute the value at,
in (0, 1).
Returns:
value (scalar): value of variable at pct
'''
assert pct > 0
assert pct < 1
loc_pct = np.argmin(np.abs(self.cum_weights - pct))
value = self.sort_dist[loc_pct]
return value
def top_share(self, pctile):
'''
Compute the top X% share
Args:
pctile (scalar): percentile to compute the top pctile% for,
in (0, 1).
Returns:
pctile_share (scalar): share of variable attributed to the
top pctile group
'''
assert pctile > 0
assert pctile < 1
loc_pctile = np.argmin(np.abs(self.cum_weights - (1 - pctile)))
pctile_share = ((
self.sort_dist[loc_pctile:] *
self.sort_weights[loc_pctile:]).sum() /
(self.sort_dist * self.sort_weights).sum())
return pctile_share
def read_cbo_forecast():
'''
This function reads the CBO Long-Term Budget Projections document
from https://www.cbo.gov/about/products/budget-economic-data#1
and then formats the relevant data for use with OG-Core
'''
CBO_LT_URL = (
'https://www.cbo.gov/system/files/2020-09/51119-2020-09-ltbo_0.xlsx'
)
# Read in data
df = pd.read_excel(CBO_LT_URL, sheet_name='3. Economic Vars',
skiprows=7, nrows=45)
df.drop(columns=['Unnamed: 3', 'Unnamed: 4'], inplace=True)
df[~((pd.isnull(df['Unnamed: 0'])) & (pd.isnull(df['Unnamed: 1'])) &
(pd.isnull(df['Unnamed: 2'])))]
df.fillna(value='', inplace=True)
df['full_var_name'] = (df['Unnamed: 0'] + df['Unnamed: 1'] +
df['Unnamed: 2'])
CBO_VAR_NAMES = {
'Real GDP (Billions of 2019 dollars) ': 'Y',
'On 10-year Treasury notes and the OASDI trust funds': 'r',
'Growth of Real Earnings per Worker': 'w_growth',
'Growth of Total Hours Worked': 'L_growth',
'Hours of All Persons (Nonfarm Business Sector)': 'L',
'Personal Consumption Expenditures': 'C',
'Gross Private Domestic Investment': 'I_total',
'Government Consumption Expenditures and Gross Investment': 'G',
'Old-Age and Survivors Insurance': 'agg_pension_outlays',
'Individual income taxes': 'iit_revenue',
'Payroll taxes': 'payroll_tax_revenue',
'Corporate income taxes': 'business_tax_revenue',
'Wages and Salaries': 'wL'}
df['var_name'] = df['full_var_name'].replace(CBO_VAR_NAMES)
# keep just variables of interest
df.drop(columns=[
'Unnamed: 0', 'Unnamed: 1', 'Unnamed: 2', 'full_var_name'],
inplace=True)
df = df[df['var_name'].isin(CBO_VAR_NAMES.values())]
# Keep just real interest rate (not nominal)
# Note that real interest rate comes first in table
df.drop_duplicates(subset='var_name', inplace=True)
# reshape so that variable names down column
df = pd.melt(df, id_vars='var_name',
value_vars=[i for i in range(1990, 2051)])
df = df.pivot(index='variable', columns='var_name', values='value')
df.reset_index(inplace=True)
df.rename(columns={'variable': 'year'}, inplace=True)
# add debt forcast
df_fiscal = pd.read_excel(CBO_LT_URL,
sheet_name='1. Summary Extended Baseline',
skiprows=9, nrows=32)
df_fiscal = df_fiscal[['Fiscal Year', 'Revenues',
'Federal Debt Held by the Public']]
df_lt = df.merge(df_fiscal, left_on='year', right_on='Fiscal Year',
how='left')
df_lt.rename(columns={'Federal Debt Held by the Public': 'D/Y'},
inplace=True)
df_lt['D'] = df_lt['Y'] * df_lt['D/Y']
CBO_10yr_budget_URL = (
'https://www.cbo.gov/system/files/2021-02/51118-2021-02-11-' +
'budgetprojections.xlsx')
df = pd.read_excel(CBO_10yr_budget_URL, sheet_name='Table 1-1',
skiprows=8, nrows=7)
df.rename(
columns={'Unnamed: 0': 'variable', 'Actual, \n2020': 2020},
inplace=True)
df.drop(columns=['Unnamed: 15', 'Unnamed: 16',
'2026.1', '2031.1'], inplace=True)
df1 = df[~((pd.isnull(df.variable)) | (df.variable == 'Other'))]
df = pd.read_excel(CBO_10yr_budget_URL, sheet_name='Table 1-3',
skiprows=9, nrows=22)
df.rename(columns={'Unnamed: 0': 'variable'}, inplace=True)
df.drop(columns=['2026.1', '2031.1'],
inplace=True)
df.drop_duplicates(subset='variable', keep='last', inplace=True)
df2 = df[~pd.isnull(df.variable)]
CBO_10yr_macro_URL = (
'https://www.cbo.gov/system/files/2021-02/51135-2021-02-' +
'economicprojections.xlsx')
df = pd.read_excel(CBO_10yr_macro_URL,
sheet_name='2. Calendar Year', skiprows=6,
nrows=131)
df.rename(columns={'Unnamed: 1': 'variable'}, inplace=True)
df.drop(columns=[
'Unnamed: 0', 'Unnamed: 2', 'Units', 'Unnamed: 19',
'Unnamed: 20', 'Unnamed: 21', 'Unnamed: 22', 'Unnamed: 23',
'Unnamed: 24'], inplace=True)
# Note that real values come second (after nominal values)
df.drop_duplicates(subset='variable', keep='last', inplace=True)
df3 = df[~pd.isnull(df.variable)]
df_st = df1.append(df2, sort=False, ignore_index=True).append(
df3, sort=False, ignore_index=True)
df_st['var_name'] = df_st['variable'].replace(CBO_VAR_NAMES)
df_st = df_st[~pd.isnull(df_st.var_name)]
df_st.drop(columns=['variable'], inplace=True)
# reshape so each row a year
df_st = pd.melt(df_st, id_vars='var_name',
value_vars=[i for i in range(2017, 2031)])
df_st = df_st.pivot(index='variable', columns='var_name',
values='value').reset_index()
df_st.rename(columns={'variable': 'year'}, inplace=True)
# merge with long term data
df_cbo = df_lt.merge(df_st, how='outer', on='year',
suffixes=('_lt', '_st'))
# replace * with 0
df_cbo.replace(to_replace='*', value=0.0, inplace=True)
return df_cbo
def print_progress(iteration, total, source_name='', prefix='Progress:',
suffix='Complete', decimals=1, bar_length=50):
'''
Prints a progress bar to the terminal when completing small tasks
of a larger job.
Args:
iteration (int>=1): which task the job is currently doing
total (int>=1): how many tasks are in the job
source_name (string): name of source data
prefix (string): what to print before the progress bar
suffix (string): what to print after the progress bar
decimals (int>=0): how many decimals in the percentage
bar_length (int>=3): how many boxes in the progress bar
Functions called: None
Objects created within function:
status (string): status of download
str_format (string): string containing percentage completed
percents (string): percentage completed
filled_length (int): number of boxes in the progress bar to fill
bar (string): progress bar
Returns: status
'''
status = 'Incomplete'
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * | |
<reponame>mhauskn/dm_control
"""
This model taken from: https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import json
import torch
import torch.nn as nn
from torch.nn import functional as F
from absl import logging
import numpy as np
from torch.distributions import Normal, Independent
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, obs_size, action_size, block_size, **kwargs):
self.obs_size = obs_size
self.action_size = action_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
def to_json(self, output_fname):
with open(output_fname, 'w') as f:
f.write(json.dumps(self.__dict__))
@staticmethod
def from_json(fname):
with open(fname, 'r') as f:
kwargs = json.loads(f.read())
return GPTConfig(**kwargs)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Linear(config.obs_size, config.n_embd)
# self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
# self.head = nn.Linear(config.n_embd, config.action_size, bias=False)
self.head = SquashedGaussianHead(config.n_embd, config.action_size, act_limit=1)
self.block_size = config.block_size
self.observables = config.observables
self.apply(self._init_weights)
self.criterion = nn.MSELoss()
logging.info("%s number of parameters: %e", self.__class__.__name__, sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, idx, targets=None):
b, t, d = idx.size()
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
# logits = self.head(x)
logits, logp_a = self.head(x, act=targets, deterministic=True)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
# loss = self.criterion(logits, targets)
# loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
loss = -logp_a
return logits, loss
class FFConfig:
""" base GPT config, params common to all GPT versions """
hidden_size = 1024
def __init__(self, obs_size, action_size, block_size, **kwargs):
assert block_size == 1, f"FFNet requires block_size=1."
self.obs_size = obs_size
self.action_size = action_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
def to_json(self, output_fname):
with open(output_fname, 'w') as f:
f.write(json.dumps(self.__dict__))
@staticmethod
def from_json(fname):
with open(fname, 'r') as f:
kwargs = json.loads(f.read())
return FFConfig(**kwargs)
class FFNet(nn.Module):
""" Fully connected baseline. Modeled after the network used in Comic. """
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.obs_size, config.hidden_size),
nn.ReLU(),
nn.Linear(config.hidden_size, config.hidden_size),
nn.ReLU(),
nn.Linear(config.hidden_size, config.action_size)
)
self.criterion = nn.MSELoss()
# self.criterion = nn.L1Loss()
self.block_size = config.block_size
self.observables = config.observables
logging.info("%s number of parameters: %e", self.__class__.__name__, sum(p.numel() for p in self.parameters()))
def configure_optimizers(self, train_config):
optimizer = torch.optim.AdamW(self.mlp.parameters(), lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, x, targets=None):
logits = self.mlp(x)
loss = None
if targets is not None:
loss = self.criterion(logits, targets)
return logits, loss
class GaussianHead(nn.Module):
""" The GaussianHead has a floating std deviation. """
def __init__(self, input_dim, act_dim):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std_layer = torch.nn.Parameter(torch.as_tensor(log_std))
self.mu_layer = nn.Linear(input_dim, act_dim, bias=False)
def forward(self, obs, act=None, deterministic=False):
# Optionally pass in an action to get the log_prob of that action
mu = self.mu_layer(obs)
std = torch.exp(self.log_std_layer)
pi = Independent(Normal(mu, std), 1)
if act is None:
act = pi.mean if deterministic else pi.rsample()
log_prob = pi.log_prob(act)
return pi, act, log_prob
class ActorCritic(nn.Module):
""" The default Actor-Critic network compatible with PPO & A2C. """
def __init__(self, obs_size, action_size, hidden_size):
super().__init__()
self.policy_net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
)
self.policy_head = GaussianHead(hidden_size, action_size)
self.value_net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1)
)
logging.info("%s number of parameters: %e", self.__class__.__name__, sum(p.numel() for p in self.parameters()))
def forward(self, obs, act=None, deterministic=False):
z = self.policy_net(obs)
pi, act, log_prob = self.policy_head(z, act, deterministic)
val = self.value_net(obs).squeeze(-1)
return pi, act, val, log_prob
def act(self, | |
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
NAME = "e91"
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL=250
"""Conclusions:
length 1000 and 5 appliances still dies immediately if learning rate is 0.1
"""
def exp_a(name):
"""RESULTS: Died immediately."""
print("e82 with seq length 1000 and 5 appliances")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'a',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_b(name):
"""RESULTS: Finishes training. Learns dish washer pretty well and something else."""
print("e82 with seq length 1000 and 5 appliances and learning rate 0.01")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'b',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_c(name):
"""RESULTS: Fails after 250 epochs. But learning looks promising!"""
print("e59 but with 5 appliances and 60 units in Conv1D")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'c',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_d(name):
"""Most promising yet! Finishes training. Learns everything but fridge and hair straighteners."""
print("e59 but with 5 appliances and learning rate 0.01")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'd',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_e(name):
"""Finishes training. Learns dish washer and that's about it. Worse than D."""
print("e59 but with 5 appliances and learning rate 0.01, linear outputs and MSE")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'e',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
)
return net
def exp_f(name):
"""Fails immediately."""
print("e59 but with 5 appliances and learning rate 0.1, and single output (Fridge)")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=True,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'f',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_g(name):
"""Fails immediately"""
print("e59 but with 5 appliances and learning rate 0.1, and single output (Fridge), linear output and MSE")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=True,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'g',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
)
return net
def exp_g(name):
"""Fails immediately."""
print("e59 but with 5 appliances and learning rate 0.1, and single output (washer), linear output and MSE")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
'dish washer',
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=True,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name=name + 'g',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
)
return net
def exp_h(name):
| |
= self.start_epoch*len(self.train_loader)
stage3_epoches = self.epoch_stage3 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage3_epoches+5)
for epoch in range(self.start_epoch, self.epoch_stage3):
epoch += 1
self.unet.train(True)
epoch_loss = 0
self.reset_grad() # 梯度累加的时候需要使用
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
assert images.size(2) == 1024
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion_stage3(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage3_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize, see https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/20 for Accumulating Gradients
if epoch <= self.epoch_stage3 - self.epoch_stage3_accumulation:
self.reset_grad()
loss.backward()
self.optimizer.step()
else:
# loss = loss / self.accumulation_steps # Normalize our loss (if averaged)
loss.backward() # Backward pass
if (i+1) % self.accumulation_steps == 0: # Wait for several backward steps
self.optimizer.step() # Now we can do an optimizer step
self.reset_grad()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage3_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage3 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage3, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage3 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage3, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=3)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 3, index, is_best)
self.writer.add_scalar('Stage3_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage3_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage3_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
def validation(self, stage=1):
# 验证的时候,train(False)是必须的0,设置其中的BN层、dropout等为eval模式
# with torch.no_grad(): 可以有,在这个上下文管理器中,不反向传播,会加快速度,可以使用较大batch size
self.unet.eval()
tbar = tqdm.tqdm(self.valid_loader)
loss_sum, dice_sum = 0, 0
if stage == 1:
criterion = self.criterion
elif stage == 2:
criterion = self.criterion_stage2
elif stage == 3:
criterion = self.criterion_stage3
with torch.no_grad():
for i, (images, masks) in enumerate(tbar):
images = images.to(self.device)
masks = masks.to(self.device)
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = criterion(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
loss = loss_set[0]
else:
loss = loss_set
loss_sum += loss.item()
# 计算dice系数,预测出的矩阵要经过sigmoid含义以及阈值,阈值默认为0.5
net_output_flat_sign = (torch.sigmoid(net_output_flat)>0.5).float()
dice = self.dice_overall(net_output_flat_sign, masks_flat).mean()
dice_sum += dice.item()
descript = "Val Loss: {:.7f}, dice: {:.7f}".format(loss.item(), dice.item())
tbar.set_description(desc=descript)
loss_mean, dice_mean = loss_sum/len(tbar), dice_sum/len(tbar)
print("Val Loss: {:.7f}, dice: {:.7f}".format(loss_mean, dice_mean))
write_txt(self.save_path, "Val Loss: {:.7f}, dice: {:.7f}".format(loss_mean, dice_mean))
return loss_mean, dice_mean
# dice for threshold selection
def dice_overall(self, preds, targs):
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds, targs = preds.cpu(), targs.cpu()
# tensor之间按位相成,求两个集合的交(只有1×1等于1)后。按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的交集大小
intersect = (preds * targs).sum(-1).float()
# tensor之间按位相加,求两个集合的并。然后按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的并集大小
union = (preds + targs).sum(-1).float()
'''
输入图片真实类标与预测类标无并集有两种情况:第一种为预测与真实均没有类标,此时并集之和为0;第二种为真实有类标,但是预测完全错误,此时并集之和不为0;
寻找输入图片真实类标与预测类标并集之和为0的情况,将其交集置为1,并集置为2,最后还有一个2*交集/并集,值为1;
其余情况,直接按照2*交集/并集计算,因为上面的并集并没有减去交集,所以需要拿2*交集,其最大值为1
'''
u0 = union == 0
intersect[u0] = 1
union[u0] = 2
return (2. * intersect / union)
def classify_score(self, preds, targs):
'''若当前图像中有mask,则为正类,若当前图像中无mask,则为负类。从分类的角度得分当前的准确率
Args:
preds: 预测出的mask矩阵
targs: 真实的mask矩阵
Return: 分类准确率
'''
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds_, targs_ = torch.sum(preds, 1), torch.sum(targs, 1)
preds_, targs_ = preds_ > 0, targs_ > 0
preds_, targs_ = preds_.cpu(), targs_.cpu()
score = torch.sum(preds_ == targs_)
return score.item()/n
def choose_threshold(self, model_path, index):
'''利用线性法搜索当前模型的最优阈值和最优像素阈值;先利用粗略搜索和精细搜索两个过程搜索出最优阈值,然后搜索出最优像素阈值;并保存搜索图
Args:
model_path: 当前模型权重的位置
index: 当前为第几个fold
Return: 最优阈值,最优像素阈值,最高得分
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using choose_threshold!' % model_path)
self.unet.eval()
with torch.no_grad():
# 先大概选取阈值范围
dices_big = []
thrs_big = np.arange(0.1, 1, 0.1) # 阈值列表
for th in thrs_big:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
# preds[preds.view(preds.shape[0],-1).sum(-1) < noise_th,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_big.append(sum(tmp) / len(tmp))
dices_big = np.array(dices_big)
best_thrs_big = thrs_big[dices_big.argmax()]
# 精细选取范围
dices_little = []
thrs_little = np.arange(best_thrs_big-0.05, best_thrs_big+0.05, 0.01) # 阈值列表
for th in thrs_little:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
# preds[preds.view(preds.shape[0],-1).sum(-1) < noise_th,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_little.append(sum(tmp) / len(tmp))
dices_little = np.array(dices_little)
# score = dices.max()
best_thr = thrs_little[dices_little.argmax()]
# 选最优像素阈值
if stage != 3:
dices_pixel = []
pixel_thrs = np.arange(0, 2304, 256) # 阈值列表
for pixel_thr in pixel_thrs:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < pixel_thr,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_pixel.append(sum(tmp) / len(tmp))
dices_pixel = np.array(dices_pixel)
score = dices_pixel.max()
best_pixel_thr = pixel_thrs[dices_pixel.argmax()]
elif stage == 3:
best_pixel_thr, score = 0, dices_little.max()
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, best_pixel_thr, score))
plt.figure(figsize=(10.4, 4.8))
plt.subplot(1, 3, 1)
plt.title('Large-scale search')
plt.plot(thrs_big, dices_big)
plt.subplot(1, 3, 2)
plt.title('Little-scale search')
plt.plot(thrs_little, dices_little)
plt.subplot(1, 3, 3)
plt.title('pixel thrs search')
if stage != 3:
plt.plot(pixel_thrs, dices_pixel)
plt.savefig(os.path.join(self.save_path, 'stage{}'.format(stage)+'_fold'+str(index)))
# plt.show()
plt.close()
return float(best_thr), float(best_pixel_thr), float(score)
def pred_mask_count(self, model_path, masks_bool, val_index, best_thr, best_pixel_thr):
'''加载模型,根据最优阈值和最优像素阈值,得到在验证集上的分类准确率。适用于训练的第二阶段使用 dice 选完阈值,查看分类准确率
Args:
model_path: 当前模型的权重路径
masks_bool: 全部数据集中的每个是否含有mask
val_index: 当前验证集的在全部数据集的下标
best_thr: 选出的最优阈值
best_pixel_thr: 选出的最优像素阈值
Return: None, 打印出有多少个真实情况有多少个正样本,实际预测出了多少个样本。但是不是很严谨,因为这不能代表正确率。
'''
count_true, count_pred = 0,0
for index1 in val_index:
if masks_bool[index1]:
count_true += 1
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
print('Loaded from %s' % model_path)
self.unet.eval()
with torch.no_grad():
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < best_pixel_thr,...] = 0.0 # 过滤噪声点
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
for index2 in range(n):
pred = preds[index2, ...]
if torch.sum(pred) > 0:
count_pred += 1
tmp.append(self.dice_overall(preds, masks).mean())
print('score:', sum(tmp) / len(tmp))
print('count_true:{}, count_pred:{}'.format(count_true, count_pred))
def grid_search(self, thrs_big, pixel_thrs):
'''利用网格法搜索最优阈值和最优像素阈值
Args:
thrs_big: 网格法搜索时的一系列阈值
pixel_thrs: 网格搜索时的一系列像素阈值
Return: 最优阈值,最优像素阈值,最高得分,网络矩阵中每个位置的得分
'''
with torch.no_grad():
# 先大概选取阈值范围和像素阈值范围
dices_big = [] # 存放的是二维矩阵,每一行为每一个阈值下所有像素阈值得到的得分
for th in thrs_big:
dices_pixel = []
for pixel_thr in pixel_thrs:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < pixel_thr,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_pixel.append(sum(tmp) / len(tmp))
dices_big.append(dices_pixel)
dices_big = np.array(dices_big)
print('粗略挑选最优阈值和最优像素阈值,dices_big_shape:{}'.format(np.shape(dices_big)))
re = np.where(dices_big == np.max(dices_big))
# 如果有多个最大值的处理方式
if np.shape(re)[1] != 1:
re = re[0]
best_thrs_big, best_pixel_thr = thrs_big[int(re[0])], pixel_thrs[int(re[1])]
best_thr, score = best_thrs_big, dices_big.max()
return best_thr, best_pixel_thr, score, dices_big
def choose_threshold_grid(self, model_path, index):
'''利用网格法搜索当前模型的最优阈值和最优像素阈值,分为粗略搜索和精细搜索两个过程;并保存热力图
Args:
model_path: 当前模型权重的位置
index: 当前为第几个fold
Return: 最优阈值,最优像素阈值,最高得分
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using choose_threshold_grid!' % model_path)
self.unet.eval()
thrs_big1 = np.arange(0.60, 0.81, 0.015) # 阈值列表
pixel_thrs1 = np.arange(768, 2305, 256) # 像素阈值列表
best_thr1, best_pixel_thr1, score1, dices_big1 = self.grid_search(thrs_big1, pixel_thrs1)
print('best_thr1:{}, best_pixel_thr1:{}, score1:{}'.format(best_thr1, best_pixel_thr1, score1))
thrs_big2 = np.arange(best_thr1-0.015, best_thr1+0.015, 0.0075) # 阈值列表
pixel_thrs2 = np.arange(best_pixel_thr1-256, best_pixel_thr1+257, 128) # 像素阈值列表
best_thr2, best_pixel_thr2, score2, dices_big2 = self.grid_search(thrs_big2, pixel_thrs2)
print('best_thr2:{}, best_pixel_thr2:{}, score2:{}'.format(best_thr2, best_pixel_thr2, score2))
if score1 < score2: best_thr, best_pixel_thr, score, dices_big = best_thr2, best_pixel_thr2, score2, dices_big2
else: best_thr, best_pixel_thr, score, dices_big = best_thr1, best_pixel_thr1, score1, dices_big1
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, best_pixel_thr, score))
f, (ax1, ax2) = plt.subplots(figsize=(14.4, 4.8), ncols=2)
cmap = sns.cubehelix_palette(start = 1.5, rot = 3, gamma=0.8, as_cmap = True)
data1 = pd.DataFrame(data=dices_big1, index=np.around(thrs_big1, 3), columns=pixel_thrs1)
sns.heatmap(data1, linewidths = 0.05, ax = ax1, vmax=np.max(dices_big1), vmin=np.min(dices_big1), cmap=cmap, annot=True, fmt='.4f')
ax1.set_title('Large-scale search')
data2 = pd.DataFrame(data=dices_big2, index=np.around(thrs_big2, 3), columns=pixel_thrs2)
sns.heatmap(data2, linewidths = 0.05, ax = ax2, vmax=np.max(dices_big2), vmin=np.min(dices_big2), cmap=cmap, annot=True, fmt='.4f')
ax2.set_title('Little-scale search')
f.savefig(os.path.join(self.save_path, 'stage{}'.format(stage)+'_fold'+str(index)))
# plt.show()
plt.close()
return | |
# -*- coding: utf-8 -*-
#!/usr/bin/env bash
#
# Author: XuMing <<EMAIL>>
# Brief: corrector with spell and stroke
import codecs
import os
import pdb
import time
import math
import sys
import argparse
import jieba.posseg as pseg
from collections import defaultdict
from pypinyin import lazy_pinyin
pwd_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(pwd_path + '/../')
import pycorrector.config as config
from pycorrector.detector import detect
from pycorrector.detector import get_frequency
from pycorrector.detector import get_ppl_score
from pycorrector.detector import trigram_char
from pycorrector.detector import word_freq
from pycorrector.utils.io_utils import dump_pkl
from pycorrector.utils.io_utils import get_logger
from pycorrector.utils.io_utils import load_pkl
from pycorrector.utils.text_utils import is_chinese_string
from pycorrector.utils.text_utils import is_chinese
from pycorrector.utils.text_utils import traditional2simplified
from pycorrector.utils.text_utils import tokenize
default_logger = get_logger(__file__)
def load_char_dict(path):
char_dict = ''
with codecs.open(path, 'r', encoding='utf-8') as f:
for w in f:
char_dict += w.strip()
return char_dict
def load_2char_dict(path):
text = codecs.open(path, 'rb', encoding = 'utf-8').read()
return set(text.split('\n'))
def load_word_dict(path):
word_dict = set()
word_dict_file = codecs.open(path, 'rb', encoding = 'utf-8').readlines()
for line in word_dict_file:
word_dict.add(line.split()[0])
return word_dict
def load_same_pinyin(path, sep='\t'):
"""
加载同音字
:param path:
:return:
"""
result = dict()
if not os.path.exists(path):
default_logger.debug("file not exists:", path)
return result
with codecs.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = traditional2simplified(line.strip())
parts = line.split(sep)
if parts and len(parts) > 2:
key_char = parts[0]
# same_pron_same_tone = set(list(parts[1]))
# same_pron_diff_tone = set(list(parts[2]))
# value = same_pron_same_tone.union(same_pron_diff_tone)
value = set(list("".join(parts)))
if len(key_char) > 1 or not value:
continue
result[key_char] = value
# these pairs would be dealed with rule
result['他'] -= {'她', '它'}
result['她'] -= {'他', '它'}
result['它'] -= {'她', '他'}
result['影'] -= {'音'}
result['车'] = result['扯']
return result
def load_same_stroke(path, sep=','):
"""
加载形似字
:param path:
:param sep:
:return:
"""
result = defaultdict(set)
if not os.path.exists(path):
default_logger.debug("file not exists:", path)
return result
with codecs.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = traditional2simplified(line.strip())
parts = line.strip().split(sep)
if parts and len(parts) > 1:
for i, c in enumerate(parts):
# result[c].add(c)
# result[c] |= set(list(parts[:i] + parts[i + 1:]))
result[c] |= set(parts)
return result
char_dict_path = os.path.join(pwd_path, config.char_dict_path)
cn_char_set = load_char_dict(char_dict_path)
two_char_dict = load_2char_dict(pwd_path + '/data/char_two_set.txt')
# # word dictionary
word_dict_text_path = os.path.join(pwd_path, config.word_dict_path)
word_dict_model_path = os.path.join(pwd_path, config.word_dict_model_path)
if os.path.exists(word_dict_model_path):
cn_word_set = load_pkl(word_dict_model_path)
else:
default_logger.debug('load word dict from text file:', word_dict_model_path)
cn_word_set = load_word_dict(word_dict_text_path)
dump_pkl(cn_word_set, word_dict_model_path)
# similar pronuciation
same_pinyin_text_path = os.path.join(pwd_path, config.same_pinyin_text_path)
same_pinyin_model_path = os.path.join(pwd_path, config.same_pinyin_model_path)
# same_pinyin = load_same_pinyin(same_pinyin_text_path)
if os.path.exists(same_pinyin_model_path):
same_pinyin = load_pkl(same_pinyin_model_path)
else:
default_logger.debug('load same pinyin from text file:', same_pinyin_text_path)
same_pinyin = load_same_pinyin(same_pinyin_text_path)
dump_pkl(same_pinyin, same_pinyin_model_path)
# similar shape
same_stroke_text_path = os.path.join(pwd_path, config.same_stroke_text_path)
same_stroke_model_path = os.path.join(pwd_path, config.same_stroke_model_path)
if os.path.exists(same_stroke_model_path):
same_stroke = load_pkl(same_stroke_model_path)
else:
default_logger.debug('load same stroke from text file:', same_stroke_text_path)
same_stroke = load_same_stroke(same_stroke_text_path)
dump_pkl(same_stroke, same_stroke_model_path)
def get_confusion_char_set(char):
# confusion_char_set = get_same_pinyin(char).union(get_same_stroke(char))
confusion_char_set = same_pinyin.get(char, set())
confusion_char_set |= same_stroke.get(char, set())
if not confusion_char_set:
confusion_char_set = {char}
return confusion_char_set
def get_confusion_two_char_set(word):
return set([char_1 + char_2 for char_1 in get_confusion_char_set(word[0]) \
for char_2 in get_confusion_char_set(word[1]) \
if char_1 + char_2 in cn_word_set])
def _generate_items(sentence, idx, word, fraction=1):
if len(word) == 1:
confusion_word_set = set([i for i in get_confusion_char_set(word[0]) if i])
if len(word) > 1:
def combine_two_confusion_char(sentence, idx, word):
# # assuming there is only two char to change
# # definitely not the final version, need to be fixed!!!!
result = set()
for i in range(len(word) - 1):
for j in range(i + 1,len(word)):
result |= set([word[: i] + i_word + word[i + 1: j] + j_word + word[j + 1:] \
for i_word in get_confusion_char_set(word[i]) if i_word \
for j_word in get_confusion_char_set(word[j]) if j_word])
return result
def confusion_set(sentence, idx, word):
# maximum number of change char is set up by 'edit_distance'
# the maximum edit-distance
edit_distance = 2
cands_tmp = [['',0]]
result = set()
ids = list(range(int(idx.split(',')[0]), int(idx.split(',')[1])))
# # change individual char
while cands_tmp:
if len(cands_tmp[0][0]) == len(word):
result.add(cands_tmp[0][0])
elif cands_tmp[0][1] == edit_distance:
result.add(cands_tmp[0][0] + word[len(cands_tmp[0][0]):])
else:
target_idx = ids[len(cands_tmp[0][0])]
for char_cand in get_confusion_char_set(sentence[target_idx]):
if target_idx == 0:
if char_cand + sentence[target_idx + 1] not in two_char_dict:
continue
elif target_idx == len(sentence) - 1:
if sentence[target_idx - 1] + char_cand not in two_char_dict:
continue
elif char_cand + sentence[target_idx + 1] not in two_char_dict and \
sentence[target_idx - 1] + char_cand not in two_char_dict:
continue
if char_cand == sentence[target_idx]:
cands_tmp.append([cands_tmp[0][0] + char_cand, cands_tmp[0][1]])
else:
cands_tmp.append([cands_tmp[0][0] + char_cand, cands_tmp[0][1] + 1])
cands_tmp.pop(0)
# # change connected two chars
for i in range(len(word) - 1):
for char_i in get_confusion_char_set(word[i]):
for char_j in get_confusion_char_set(word[i + 1]):
if char_i + char_j in two_char_dict:
result.add(word[:i] + char_i + char_j + word[i + 2:])
return result
confusion_word_set = confusion_set(sentence, idx, word)
confusion_word_list = [item for item in confusion_word_set if is_chinese_string(item)]
confusion_sorted = sorted(confusion_word_list, key=lambda k: get_frequency(k), reverse=True)
return confusion_sorted[:len(confusion_word_list) // fraction + 1]
def get_sub_array(nums):
"""
取所有连续子串,
[0, 1, 2, 5, 7, 8]
=> [[0, 3], 5, [7, 9]]
:param nums: sorted(list)
:return:
"""
ret = []
for i, c in enumerate(nums):
if i == 0:
pass
elif i <= ii:
continue
elif i == len(nums) - 1:
ret.append([c])
break
ii = i
cc = c
# get continuity Substring
while ii < len(nums) - 1 and nums[ii + 1] == cc + 1:
ii = ii + 1
cc = cc + 1
if ii > i:
ret.append([c, nums[ii] + 1])
else:
ret.append([c])
return ret
def get_valid_sub_array(sentence, sub_array_list):
"""
this function is to get rid of puctuation in detected string
:param sentence: target sentence
subarray: index of suspected string
:return valid_array: index of valid suspected string without punctuation
"""
# print(sub_array_list)
valid_array_detail = []
for sub_array in sub_array_list:
valid_sub_array_detail = []
if len(sub_array) == 1:
if is_chinese(sentence[sub_array[0]]):
valid_array_detail.append([sub_array[0], sub_array[0]])
else:
for i in range(sub_array[0], sub_array[1]):
if is_chinese(sentence[i]):
valid_sub_array_detail.append(i)
elif valid_sub_array_detail:
valid_array_detail.append(valid_sub_array_detail)
valid_sub_array_detail = []
if valid_sub_array_detail:
valid_array_detail.append(valid_sub_array_detail)
# print(valid_array_detail)
return [[sub[0], sub[-1] + 1] for sub in valid_array_detail]
def count_diff(str1, str2):
"""
Counting the number of different chars between two string.
Assuming len(str1) == len(str2)
"""
count = 0
for i in range(len(str1)):
if str1[i] != str2[i]:
count += 1
return count
def correct_stat(sentence, sub_sents, param_ec, param_gd):
"""
statistical correction
input
sentence : error sentence in form of string
sub_sents: pair of index range of suspect chars and corresponding suspect chars
in the form like: [['str(b_idx),str(e_idx)',str(suspect_chars)], ...]\
param_ec : paramter for edition cost, might change for matching your own language model
param_gd : paramter for global decision, might change for different lm
output
sentence : corrected sentence in form of string
detail : correction detail in form like [[err_chars, cor_chars, b_idx, e_idx + 1]]
"""
detail = []
cands = []
for idx, item in sub_sents:
maybe_error_items = _generate_items(sentence, idx, item)
if not maybe_error_items:
continue
ids = idx.split(',')
begin_id = int(ids[0])
end_id = int(ids[-1]) if len(ids) > 1 else int(ids[0]) + 1
before = sentence[:begin_id]
after = sentence[end_id:]
base_score = get_ppl_score(list(before + item + after), mode=trigram_char)
min_score = base_score
corrected_item = item
for candidate in maybe_error_items:
score = get_ppl_score(list(before + candidate + after), mode=trigram_char) \
+ param_ec * count_diff(item, candidate) * math.log(base_score)
if score < min_score:
corrected_item = candidate
min_score = score
delta_score = base_score - min_score
cands.append([idx, corrected_item, delta_score])
cands.sort(key = lambda x: x[2], reverse = True)
for i, [idx, corrected_item, delta_score] in enumerate(cands):
if delta_score > i * param_gd * math.log(base_score):
idx = [int(idx.split(",")[0]), int(idx.split(",")[1])]
detail.append(list(zip([sentence[idx[0]:idx[1]]], \
[corrected_item], \
[idx[0]], \
[idx[1]])))
sentence = sentence[: idx[0]] + \
corrected_item + \
sentence[idx[1]:]
else:
break
return sentence, detail
def get_sub_sent(idx, sentence):
"""
To get the longest sub_sentence which the target char(sentence[idx]) belong to
and does not contain any non-charactor symbol(punctuation)
"""
begin_id = 0
end_id = 0
for i in range(idx,-1,-1):
if not is_chinese(sentence[i]):
begin_id = i
break
for i in range(idx, len(sentence)):
if not is_chinese(sentence[i]):
end_id = i
break
return [begin_id, end_id]
def correct_rule(sentence):
"""
rule-based correction(strongly depending on POS tagging)
input
sentence : error sentence
output
sentence : corrected sentence
detail : correction detail(exactly same form as that of correct_stat())
"""
detail = []
old_sentence = sentence
# # rule for '他她它' here is too simple to apply for present, improvement needed!
# # rule for '他她它'('he, she, it')
# dict_hsi = {
# '他' : {'爸','父','爷','哥','弟','兄','子','叔','伯','他','爹','先生'},
# '她' : {'妈','母','奶','姐','妹','姑姑','婶','姊','妯','娌','她','婆','姨','太太','夫人','娘'},
# '它' : {'它'}
# }
# for i in range(len(sentence)):
# if sentence[i] in dict_hsi.keys():
# for key in dict_hsi.keys():
| |
import os
import time
import logging
import numpy as np
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from scipy.integrate import solve_ivp, trapz, quad
from .utils import InvalidJumpError
from .utils import GRAV_ACC, EPS
from .utils import compute_dist_from_flat, vel2speed
if 'ONHEROKU' in os.environ:
plt = None
else:
import matplotlib.pyplot as plt
class Surface(object):
"""Base class for a 2D curve that represents the cross section of a surface
expressed in a standard Cartesian coordinate system."""
# If a user provides x,y data to create the surface that has any x spacings
# greater than this value, then the data will be interpolated before the
# slope and curvature derivatives are calculated.
max_x_spacing = 0.3 # meters
def __init__(self, x, y):
"""Instantiates an arbitrary 2D surface.
Parameters
==========
x : array_like, shape(n,)
The horizontal, x, coordinates of the slope. x[0] should be the
left most horizontal position and corresponds to the start of the
surface. This should be monotonically increasing and ideally have
no adjacent spacings less than 0.3 meter.
y : array_like, shape(n,)
The vertical, y, coordinates of the slope. y[0] corresponds to the
start of the surface.
Warns
=====
x and y values that have any x spacings larger than 0.3 meters will be
resampled at x spacings of approximately 0.3 meters.
"""
self.x = np.asarray(x)
self.y = np.asarray(y)
self._initialize_surface()
def _initialize_surface(self):
self._check_monotonic()
self._check_x_spacing()
self._initialize_gradients()
self._initialize_interpolators()
def _check_x_spacing(self):
"""Resamples x and y at an approximately 0.3 linear spacing if any x
spacings are too large."""
if any(np.diff(self.x) > self.max_x_spacing):
msg = ('The x values have at least one spacing larger than '
'{:1.1f} meters and will be replace with a finer x spacing '
'and the y values linearly interpolated at this new '
'spacing.')
logging.warning(msg.format(self.max_x_spacing))
# ensure spacing is less than max_x_spacing
total_x = self.x[-1] - self.x[0]
num = round(np.ceil(total_x / self.max_x_spacing)) + 1
x = np.linspace(self.x[0], self.x[-1], num=num)
kwargs = {'fill_value': 'extrapolate'}
interp_y = interp1d(self.x, self.y, **kwargs)
y = interp_y(x)
self.x = x
self.y = y
def _initialize_gradients(self):
self.slope = np.gradient(self.y, self.x, edge_order=2)
slope_deriv = np.gradient(self.slope, self.x, edge_order=2)
self.curvature = slope_deriv / (1 + self.slope**2)**1.5
def _initialize_interpolators(self):
kwargs = {'fill_value': 'extrapolate'}
self.interp_y = interp1d(self.x, self.y, **kwargs)
self.interp_slope = interp1d(self.x, self.slope, **kwargs)
self.interp_curvature = interp1d(self.x, self.curvature, **kwargs)
def _check_monotonic(self):
# NOTE: eps solution only works when adding to 0.
eps = np.finfo(float).eps
count = 0
while any(np.diff(self.x) == 0):
idx = np.array(np.where(np.diff(self.x) == 0), dtype=np.int32)
self.x[idx+1] += 20*eps
count += 1
if count > 10:
msg = ('While loop ran for too long: epsilon error')
raise InvalidJumpError(msg)
if any(np.diff(self.x) < 0):
msg = ('x-coordinates are not monotonically increasing.')
raise InvalidJumpError(msg)
@property
def start(self):
"""Returns the x and y coordinates at the start point of the
surface."""
return self.x[0], self.y[0]
@property
def end(self):
"""Returns the x and y coordinates at the end point of the surface."""
return self.x[-1], self.y[-1]
def shift_coordinates(self, delx, dely):
"""Shifts the x and y coordinates by delx and dely respectively. This
modifies the surface in place."""
self.x += delx
self.y += dely
# NOTE : Only the interpolators have to be reinitialized, the gradients
# don't have to be computed again. For now, this method is here for
# consistency among *Surface classes.
self._initialize_surface()
def distance_from(self, xp, yp):
"""Returns the shortest distance from point (xp, yp) to the surface.
Parameters
==========
xp : float
The horizontal, x, coordinate of the point.
yp : float
The vertical, y, coordinate of the point.
Returns
=======
distance : float
The shortest distance from the point to the surface. If the point
is above the surface a positive distance is returned, else a
negative distance.
Note
====
This general implementation can be slow, so implement overloaded
``distance_from()`` methods in subclasses when you can.
"""
def distance_squared(x):
return (xp - x)**2 + (yp - self.interp_y(x))**2
distances = np.sqrt((self.x - xp)**2 + (self.y - yp)**2)
x = fsolve(distance_squared, self.x[np.argmin(distances)])
return np.sign(yp - self.interp_y(x)) * np.sqrt(distance_squared(x))
def length(self):
"""Returns the length of the surface in meters via a numerical line
integral."""
def func(x):
return np.sqrt(1.0 + self.interp_slope(x)**2)
return quad(func, self.x[0], self.x[-1])[0]
def area_under(self, x_start=None, x_end=None, interval=0.05):
"""Returns the area under the curve integrating wrt to the x axis at
0.05 m intervals using the trapezoidal rule."""
if x_start is not None:
if x_start < self.start[0] or x_start > self.end[0]:
raise ValueError('x_start has to be between start and end.')
else:
x_start = self.start[0]
if x_end is not None:
if x_end < self.start[0] or x_end > self.end[0]:
raise ValueError('x_end has to be between start and end.')
else:
x_end = self.end[0]
x = np.linspace(x_start, x_end, num=int((x_end - x_start) / interval))
y = self.interp_y(x)
return trapz(y, x)
def height_above(self, surface):
"""Returns an array of values giving the height each point in this
surface is above the provided surface."""
return self.y - surface.interp_y(self.x)
def calculate_efh(self, takeoff_angle, takeoff_point, skier, increment=0.2):
"""Returns the equivalent fall height for the surface at the specified
constant intervals relative to the provided takeoff point or the start
of the surface.
Parameters
==========
takeoff_angle : float
Takeoff angle in radians.
takeoff_point : 2-tuple of floats
x and y coordinates of the point at which the skier leaves the
takeoff ramp.
skier : Skier
A skier instance.
increment : float, optional
x increment in meters between each calculated landing location.
Returns
=======
distance_x : ndarray, shape(n,)
Horizontal x locations of the equivalent fall height measures
spaced at the specified meter intervals relative to leftmost point
on the surface or the takeoff point, whichever is greater.
efh : ndarray, shape(n,)
The equivalent fall height corresponding to each value in
``distance_x``.
takeoff_speeds : ndarray, shape(n,)
The takeoff speed required to land the corresponding x coordinate.
"""
if abs(takeoff_angle) > np.pi/2:
msg = ('Takeoff angle must be between -pi/2 and pi/2.')
raise InvalidJumpError(msg)
if self.x[0] < takeoff_point[0] < self.x[-1]:
check_takeoff = self.interp_y(takeoff_point[0])
if takeoff_point[1] - check_takeoff < 0:
msg = ('Takeoff point cannot be under the surface.')
raise InvalidJumpError(msg)
elif self.end[0] <= takeoff_point[0]:
msg = ('Takeoff point cannot be downhill from surface.')
raise InvalidJumpError(msg)
# NOTE : If the takeoff point is before the start of the surface and below the
# height of the first surface point, the slope between the takeoff point
# and the left-most surface point must be less than the takeoff angle.
elif (takeoff_point[0] < self.start[0]):
slope = (self.start[1] - takeoff_point[1])/(self.start[0] - takeoff_point[0])
if takeoff_angle < np.arctan(slope):
msg = ('Takeoff angle does not allow impact on the surface '
'from above.')
raise InvalidJumpError(msg)
isGreaterTakeoff = self.x >= takeoff_point[0]
x = self.x[isGreaterTakeoff]
y = self.y[isGreaterTakeoff]
# NOTE : intervals are desired but the x distance is not necessarily
# divisible by the increment, so we drop the remainder so it is
# divisible and make the range inclusive.
remainder = (x[-1] - x[0]) % increment
rnge = (x[0], x[-1] - remainder)
num_points = int((x[-1] - x[0] - remainder) / increment) + 1
distance_x = np.linspace(*rnge, num=num_points)
slope = self.interp_slope(distance_x)
slope_angle = np.arctan(slope)
kwargs = {'fill_value': 'extrapolate'}
interp_y_efh = interp1d(x, y, **kwargs)
height_y = interp_y_efh(distance_x)
# NOTE : Create a surface under the surface that the skier will impact
# if they pass over the primary surface (self).
catch_surf = HorizontalSurface(np.min(height_y) - 0.1,
abs(distance_x[0] - distance_x[-1] + 2.0),
start=distance_x[-1] - 1.0)
efh = np.empty(len(distance_x))
efh[:] = np.nan
takeoff_speeds = np.full(len(distance_x), np.nan)
for i, (x, y, m) in enumerate(zip(distance_x, height_y, slope_angle)):
takeoff_speed, impact_vel = \
skier.speed_to_land_at((x, y), takeoff_point, takeoff_angle,
catch_surf)
# TODO: Use fly to check that it hits the x,y
impact_speed, impact_angle = vel2speed(*impact_vel)
# NOTE : A nan is inserted if skier surpasses 100 miles per hour
if takeoff_speed > 44:
msg = ('Impact of the surface from above is only possible until'
' {:.2f} meters. Calculation aborted.')
logging.warning(msg.format(x))
break
efh[i] = (impact_speed ** 2 * np.sin(m - impact_angle) ** 2 /
(2 * GRAV_ACC))
takeoff_speeds[i] = takeoff_speed
return distance_x, efh, takeoff_speeds
def plot(self, ax=None, **plot_kwargs):
"""Returns a | |
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattermapbox.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
lat .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.scattermapbox.Line`
instance or dict with compatible properties
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
lon .
marker
:class:`plotly.graph_objects.scattermapbox.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattermapbox.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattermapbox.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font (color=mapbox.layer.paint.text-
color, size=mapbox.layer.layout.text-size). Has an
effect only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `lat`, `lon`
and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattermapbox.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
row : 'all', int or None (default)
Subplot row index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
rows in the specified column(s).
col : 'all', int or None (default)
Subplot col index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
columns in the specified row(s).
Returns
-------
Figure
"""
from plotly.graph_objs import Scattermapbox
new_trace = Scattermapbox(
below=below,
connectgaps=connectgaps,
customdata=customdata,
customdatasrc=customdatasrc,
fill=fill,
fillcolor=fillcolor,
hoverinfo=hoverinfo,
hoverinfosrc=hoverinfosrc,
hoverlabel=hoverlabel,
hovertemplate=hovertemplate,
hovertemplatesrc=hovertemplatesrc,
hovertext=hovertext,
hovertextsrc=hovertextsrc,
ids=ids,
idssrc=idssrc,
lat=lat,
latsrc=latsrc,
legendgroup=legendgroup,
line=line,
lon=lon,
lonsrc=lonsrc,
marker=marker,
meta=meta,
metasrc=metasrc,
mode=mode,
name=name,
opacity=opacity,
selected=selected,
selectedpoints=selectedpoints,
showlegend=showlegend,
stream=stream,
subplot=subplot,
text=text,
textfont=textfont,
textposition=textposition,
textsrc=textsrc,
texttemplate=texttemplate,
texttemplatesrc=texttemplatesrc,
uid=uid,
uirevision=uirevision,
unselected=unselected,
visible=visible,
**kwargs
)
return self.add_trace(new_trace, row=row, col=col)
def add_scatterpolar(
self,
cliponaxis=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
dr=None,
dtheta=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
line=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
r=None,
r0=None,
rsrc=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
subplot=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
theta=None,
theta0=None,
thetasrc=None,
| |
will be set if the application can process the
# policyConstraints extension. If the application can process the
# policyConstraints extension, then the path should not validate
# successfully. If the application can not process the policyConstraints
# extension, then the path should validate successfully.
TestInfo(False, user_constrained_policy_set=[]),
],
'4.8.8': [ # Different Policies Test8
# Procedure: Validate Different Policies Test8 EE using the default
# settings or open and verify Signed Test Message 192.168.127.12 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set and the
# user-constrained-policy-set will be empty. The explicit-policy-indicator
# will be set if the application can process the policyConstraints
# extension. If the application can process the policyConstraints extension
# then the path should not validate successfully. If the application can
# not process the policyConstraints extension, then the path should
# validate successfully.
TestInfo(False, user_constrained_policy_set=[]),
],
'4.8.9': [ # Different Policies Test9
# Procedure: Validate Different Policies Test9 EE using the default
# settings or open and verify Signed Test Message 192.168.3.114 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set and the
# user-constrained-policy-set will be empty. The explicit-policy-indicator
# will be set if the application can process the policyConstraints
# extension. If the application can process the policyConstraints
# extension, then the path should not validate successfully. If the
# application can not process the policyConstraints extension, then the
# path should validate successfully.
TestInfo(False, user_constrained_policy_set=[]),
],
'4.8.10': [ # All Certificates Same Policies Test10
# 1. default settings. The path should validate successfully.
TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1, TEST_POLICY_2]),
# 2. default settings, but with initial-policy-set = {NIST-test-policy-1}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_1],
user_constrained_policy_set=[TEST_POLICY_1]),
# 3. default settings, but with initial-policy-set = {NIST-test-policy-2}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_2],
user_constrained_policy_set=[TEST_POLICY_2]),
],
'4.8.11': [ # All Certificates AnyPolicy Test11
# 1. default settings. The path should validate successfully.
TestInfo(True, user_constrained_policy_set=[ANY_POLICY]),
# 2. default settings, but with initial-policy-set = {NIST-test-policy-1}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_1],
user_constrained_policy_set=[TEST_POLICY_1]),
],
'4.8.12': [ # Different Policies Test12
# Procedure: Validate Different Policies Test12 EE using the default
# settings or open and verify Signed Test Message 172.16.58.3 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set and the
# user-constrained-policy-set will be empty. The explicit-policy-indicator
# will be set if the application can process the policyConstraints
# extension. If the application can process the policyConstraints
# extension, then the path should not validate successfully. If the
# application can not process the policyConstraints extension, then the
# path should validate successfully.
TestInfo(False, user_constrained_policy_set=[]),
],
'4.8.13': [ # All Certificates Same Policies Test13
# 1. default settings, but with initial-policy-set = {NIST-test-policy-1}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_1],
user_constrained_policy_set=[TEST_POLICY_1]),
# 2. default settings, but with initial-policy-set = {NIST-test-policy-2}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_2],
user_constrained_policy_set=[TEST_POLICY_2]),
# 3. default settings, but with initial-policy-set = {NIST-test-policy-3}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_3],
user_constrained_policy_set=[TEST_POLICY_3]),
],
'4.8.14': [ # AnyPolicy Test14
# 1. default settings, but with initial-policy-set = {NIST-test-policy-1}.
# The path should validate successfully.
TestInfo(True, initial_policy_set=[TEST_POLICY_1],
user_constrained_policy_set=[TEST_POLICY_1]),
# 2. default settings, but with initial-policy-set = {NIST-test-policy-2}.
# The path should not validate successfully.
TestInfo(False, initial_policy_set=[TEST_POLICY_2],
user_constrained_policy_set=[]),
],
'4.8.15': [ # User Notice Qualifier Test15
# Procedure: Validate User Notice Qualifier Test15 EE using the default
# settings or open and verify Signed Test Message 172.16.58.3 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set will be
# {NIST-test-policy-1} and the explicit-policy-indicator will be the same
# as the initial-explicit-policy indicator. If the initial-policy-set is
# any-policy or otherwise includes NIST-test-policy-1, then the
# user-constrained-policy-set will be {NIST-test-policy-1}. If not, the
# user-constrained-policy-set will be empty. If the initial-explicit-policy
# indicator is set and the initial-policy-set does not include
# NIST-test-policy-1, then the path should be rejected, otherwise it should
# validate successfully. If the path validates successfully, then the
# application should display the user notice.
TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]),
],
'4.8.16': [ # User Notice Qualifier Test16
# Procedure: Validate User Notice Qualifier Test16 EE using the default
# settings or open and verify Signed Test Message 172.16.58.3 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set will be
# {NIST-test-policy-1} and the explicit-policy-indicator will be the same
# as the initial-explicit-policy indicator. If the initial-policy-set is
# any-policy or otherwise includes NIST-test-policy-1, then the
# user-constrained-policy-set will be {NIST-test-policy-1}. If not, the
# user-constrained-policy-set will be empty. If the initial-explicit-policy
# indicator is set and the initial-policy-set does not include
# NIST-test-policy-1, then the path should be rejected, otherwise it should
# validate successfully. If the path validates successfully, then the
# application should display the user notice associated with
# NIST-test-policy-1. The user notice associated with NIST-test-policy-2
# should not be displayed.
TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]),
],
'4.8.17': [ # User Notice Qualifier Test17
# Procedure: Validate User Notice Qualifier Test17 EE using the default
# settings or open and verify Signed Test Message 172.16.58.3 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set will be
# {NIST-test-policy-1} and the explicit-policy-indicator will be the same
# as the initial-explicit-policy indicator. If the initial-policy-set is
# any-policy or otherwise includes NIST-test-policy-1, then the
# user-constrained-policy-set will be {NIST-test-policy-1}. If not, the
# user-constrained-policy-set will be empty. If the initial-explicit-policy
# indicator is set and the initial-policy-set does not include
# NIST-test-policy-1, then the path should be rejected, otherwise it should
# validate successfully. If the path validates successfully, then the
# application should display the user notice associated with anyPolicy.
TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]),
],
'4.8.18': [ # User Notice Qualifier Test18
# 1. default settings, but with initial-policy-set = {NIST-test-policy-1}.
# The path should validate successfully and the qualifier associated with
# NIST-test-policy-1 in the end entity certificate should be displayed.
TestInfo(True, initial_policy_set=[TEST_POLICY_1],
user_constrained_policy_set=[TEST_POLICY_1]),
# 2. default settings, but with initial-policy-set = {NIST-test-policy-2}.
# The path should validate successfully and the qualifier associated with
# anyPolicy in the end entity certificate should be displayed.
TestInfo(True, initial_policy_set=[TEST_POLICY_2],
user_constrained_policy_set=[TEST_POLICY_2]),
],
'4.8.19': [ # User Notice Qualifier Test19
# Procedure: Validate User Notice Qualifier Test19 EE using the default
# settings or open and verify Signed Test Message 192.168.3.11 using the
# default settings.
#
# Expected Result: The authorities-constrained-policy-set will be
# {NIST-test-policy-1} and the explicit-policy-indicator will be the same
# as the initial-explicit-policy indicator. If the initial-policy-set is
# any-policy or otherwise includes NIST-test-policy-1, then the
# user-constrained-policy-set will be {NIST-test-policy-1}. If not, the
# user-constrained-policy-set will be empty. If the initial-explicit-policy
# indicator is set and the initial-policy-set does not include
# NIST-test-policy-1, then the path should be rejected, otherwise it should
# validate successfully. Since the explicitText exceeds the maximum size
# of 200 characters, the application may choose to reject the certificate.
# If the application accepts the certificate, display of the user notice is
# optional.
TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]),
],
'4.8.20': [ # CPS Pointer Qualifier Test20
# Procedure: Validate CPS Pointer Qualifier Test20 EE using the default
# settings or open and verify Signed Test Message 6.2.2.85 using the
# default settings. (If possible, it is recommended that this test be run
# with the initial-explicit-policy indicator set. If this can not be done,
# manually check that the authorities-constrained-policy-set and
# user-constrained-policy-set are correct.)
#
# Expected Result: The authorities-constrained-policy-set will be
# {NIST-test-policy-1} and the explicit-policy-indicator will be the same
# as the initial-explicit-policy indicator. If the initial-policy-set is
# any-policy or otherwise includes NIST-test-policy-1, then the
# user-constrained-policy-set will be {NIST-test-policy-1}. If not, the
# user-constrained-policy-set will be empty. If the initial-explicit-policy
# indicator is set and the initial-policy-set does not include
# NIST-test-policy-1, then the path should be rejected, otherwise it should
# validate | |
numbers)
"""
# Note: can also get usd_attr.GetTimeSamples()
prim = stage.GetPrimAtPath(scene_path)
if UsdGeom.Points(prim):
geom_points = UsdGeom.Points(prim)
result = geom_points.GetPointsAttr().GetBracketingTimeSamples(target_time)
elif UsdGeom.PointInstancer(prim):
instancer = UsdGeom.PointInstancer(prim)
result = instancer.GetPositionsAttr().GetBracketingTimeSamples(target_time)
else:
raise TypeError('The prim is neither UsdGeomPoints nor UsdGeomPointInstancer.')
return result
def add_pointcloud(stage, points, scene_path, colors=None, time=None, points_type='point_instancer'):
r"""Add a pointcloud to an existing USD stage.
Create a pointcloud represented by point instances of a sphere centered at each point coordinate.
The stage is modified but not saved to disk.
Args:
stage (Usd.Stage): Stage onto which to add the pointcloud.
points (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``.
scene_path (str): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path.
colors (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud
tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'.
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> stage = create_stage('./new_stage.usd')
>>> points = torch.rand(100, 3)
>>> stage = add_pointcloud(stage, points, '/World/PointClouds/pointcloud_0')
>>> stage.Save()
"""
scene_path = Sdf.Path(scene_path)
if time is None:
time = Usd.TimeCode.Default()
if stage.GetPrimAtPath(scene_path):
points_prim = stage.GetPrimAtPath(scene_path)
else:
if points_type == 'point_instancer':
points_prim = stage.DefinePrim(scene_path, 'PointInstancer')
elif points_type == 'usd_geom_points':
points_prim = stage.DefinePrim(scene_path, 'Points')
else:
raise ValueError('Expected points_type to be "usd_geom_points" or "point_instancer", '
f'but got "{points_type}".')
if points_type == 'point_instancer':
geom_points = UsdGeom.PointInstancer(points_prim)
sphere = UsdGeom.Sphere.Define(stage, f'{scene_path}/sphere')
sphere.GetRadiusAttr().Set(0.5)
geom_points.CreatePrototypesRel().SetTargets([sphere.GetPath()])
elif points_type == 'usd_geom_points':
geom_points = UsdGeom.Points(points_prim)
# Calculate default point scale
bounds = points.max(dim=0)[0] - points.min(dim=0)[0]
min_bound = min(bounds)
scale = (min_bound / points.size(0) ** (1 / 3)).item()
# Generate instancer parameters
positions = points.detach().cpu().tolist()
scales = np.asarray([scale, ] * points.size(0))
if points_type == 'point_instancer':
indices = [0] * points.size(0)
# Populate point instancer
geom_points.GetProtoIndicesAttr().Set(indices, time=time)
geom_points.GetPositionsAttr().Set(positions, time=time)
scales = [(scale,) * 3] * points.size(0)
geom_points.GetScalesAttr().Set(scales, time=time)
elif points_type == 'usd_geom_points':
# Populate UsdGeomPoints
geom_points.GetPointsAttr().Set(points.numpy(), time=time)
geom_points.GetWidthsAttr().Set(Vt.FloatArray.FromNumpy(scales), time=time)
if colors is not None and points_type == 'usd_geom_points':
assert colors.shape == points.shape, 'Colors and points must have the same shape.'
geom_points.GetDisplayColorAttr().Set(colors.numpy(), time=time)
return stage
def export_pointcloud(file_path, pointcloud, scene_path='/World/PointClouds/pointcloud_0',
color=None, time=None, points_type='point_instancer'):
r"""Export a single pointcloud to a USD scene.
Export a single pointclouds to USD. The pointcloud will be added to the USD stage and represented
by point instances of a sphere centered at each point coordinate. The stage is then saved to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
pointcloud (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``.
scene_path (str, optional): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path.
If no path is provided, a default path is used.
color (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud
tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'.
time (convertible to float): Positive integer defining the time at which the supplied parameters correspond to.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> points = torch.rand(100, 3)
>>> stage = export_pointcloud('./new_stage.usd', points)
"""
stage = export_pointclouds(file_path, [pointcloud], [scene_path], colors=[color], times=[time],
points_type=points_type)
return stage
def export_pointclouds(file_path, pointclouds, scene_paths=None, colors=None, times=None,
points_type='point_instancer'):
r"""Export one or more pointclouds to a USD scene.
Export one or more pointclouds to USD. The pointclouds will be added to the USD stage and represented
by point instances of a sphere centered at each point coordinate. The stage is then saved to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
pointclouds (list of torch.FloatTensor): List of pointcloud tensors of length ``N`` defining N pointclouds.
scene_paths (list of str, optional): Absolute path(s) of pointcloud(s) within the USD file scene.
Must be a valid Sdf.Path. If no path is provided, a default path is used.
times (list of int): Positive integers defining the time at which the supplied parameters correspond to.
colors (list of tensors, optional): Lits of RGB colors of length ``N``, each corresponding to a pointcloud
in the pointcloud list. colors only works if points_type is 'usd_geom_points'.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> points = torch.rand(100, 3)
>>> stage = export_pointcloud('./new_stage.usd', points)
"""
if scene_paths is None:
scene_paths = [f'/World/PointClouds/pointcloud_{i}' for i in range(len(pointclouds))]
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
if colors is None:
colors = [None] * len(scene_paths)
assert len(pointclouds) == len(scene_paths)
stage = create_stage(file_path)
for scene_path, points, color, time in zip(scene_paths, pointclouds, colors, times):
add_pointcloud(stage, points, scene_path, color, time=time, points_type=points_type)
stage.Save()
return stage
# VoxelGrid Functions
def import_voxelgrid(file_path, scene_path, time=None):
r"""Import a single voxelgrid from a USD file.
Assumes that the USD voxelgrid is defined by a point instancer. Converts the coordinates
of each point instance to an occupied voxel. The output grid size is determined by the `grid_size`
primvar. If not specified, grid size will be determined by the axis with the largest number of occupied
voxels. The output voxelgrid will be of shape ``[grid_size, grid_size, grid_size]``.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
scene_path (str): Scene path within the USD file indicating which PointInstancer primitive
to import as a voxelgrid.
time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters.
Returns:
torch.BoolTensor
Example:
>>> voxelgrid = torch.rand(32, 32, 32) > 0.5
>>> stage = export_voxelgrid('./new_stage.usd', voxelgrid, scene_path='/World/voxelgrid')
>>> voxelgrid_imp = import_voxelgrid(file_path='./new_stage.usd',
... scene_path='/World/voxelgrid')
>>> voxelgrid_imp.shape
torch.Size([32, 32, 32])
"""
if time is None:
time = Usd.TimeCode.Default()
voxelgrid_list = import_voxelgrids(file_path, [scene_path], times=[time])
return voxelgrid_list[0]
def import_voxelgrids(file_path, scene_paths=None, times=None):
r"""Import one or more voxelgrids from a USD file.
Assumes that the USD voxelgrid is defined by a point instancer. Converts the coordinates
of each point instance to an occupied voxel. The output grid size is determined from the `grid_size`
primvar. If not specified, grid size will be determined by the axis with the largest number of occupied
voxels. The output voxelgrid will be of shape ``[grid_size, grid_size, grid_size]``.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
scene_paths (list of str, optional): Scene path(s) within the USD file indicating which PointInstancer
primitive(s) to import. If None, will return all pointclouds found based on PointInstancer
prims with `kaolin_type` primvar set to `VoxelGrid`.
times (list of int): Positive integers indicating the time at which to retrieve parameters.
Returns:
(list of torch.BoolTensor)
Example:
>>> voxelgrid_1 = torch.rand(32, 32, 32) > 0.5
>>> voxelgrid_2 = torch.rand(32, 32, 32) > 0.5
>>> stage = export_voxelgrids('./new_stage.usd', [voxelgrid_1, voxelgrid_2])
>>> voxelgrid_imp = import_voxelgrids(file_path='./new_stage.usd')
>>> len(voxelgrid_imp)
2
>>> voxelgrid_imp[0].shape
torch.Size([32, 32, 32])
"""
assert os.path.exists(file_path)
stage = Usd.Stage.Open(file_path)
# If scene path not specified, find all point clouds
if scene_paths is None:
scene_paths = []
for p in stage.Traverse():
is_point_instancer = UsdGeom.PointInstancer(p)
if is_point_instancer and p.GetAttribute('primvars:kaolin_type').Get() == 'VoxelGrid':
scene_paths.append(p.GetPath())
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
voxelgrids = []
for scene_path, time in zip(scene_paths, times):
prim = stage.GetPrimAtPath(scene_path)
assert prim, f'The prim at {scene_path} does not exist.'
instancer = UsdGeom.PointInstancer(prim)
assert instancer # Currently only support pointclouds from point instancers
voxel_indices = torch.tensor(instancer.GetPositionsAttr().Get(time=time), dtype=torch.long)
bounds = voxel_indices.max(dim=0)[0]
max_bound = bounds.max()
grid_size = prim.GetAttribute('primvars:grid_size').Get(time=time)
if grid_size is not None:
assert max_bound < grid_size
else:
grid_size = max_bound
voxelgrid = torch.zeros([grid_size, grid_size, grid_size], dtype=torch.bool)
voxelgrid[voxel_indices[:, 0], voxel_indices[:, 1], voxel_indices[:, 2]] = 1.
voxelgrids.append(voxelgrid)
return voxelgrids
def add_voxelgrid(stage, voxelgrid, scene_path, time=None):
r"""Add a voxelgrid to an existing USD stage.
Add a voxelgrid where occupied voxels are defined by non-zero | |
EXACT same) :
>>> model = nn.models.NeuralNetwork()
>>> model.add(nn.layers.Flatten())
>>> model.add(nn.layers.Dense(784, 64, activation = nn.layers.ReLU()))
>>> model.add(nn.layers.Dense(64, 32, activation = nn.layers.ReLU()))
>>> model.add(nn.layers.Dense(32, 10, activation = nn.layers.ReLU()))
Obviously we want to finalize our model for good practice :
>>> model.finalize(loss = nn.loss.CrossEntropy(), optimizer = nn.optimizers.Adam())
and we can enter in the weights & biases using the enter_parameters() :
>>> model.enter_parameters(params) #must be params given from the give_parameters() method
and train! :
>>> model.train(X_train, y_train, epochs = 100) #let's try 100 epochs this time
The reason this is such a big deal is because we don't have to start training from scratch all over again. The model
will not have to start from 0% accuracy, but may start with 80% given the params belonging to our partially-trained model
we loaded from the pickle file. This is of course not a big deal for something like MNIST but will be for bigger model architectures, or datasets.
Of course there's a lot more things we could've changed, but I think that's pretty good for now!
"""
def __init__(self, layers=None):
from .layers import Dense, Flatten, Softmax, Dropout
from .loss import CrossEntropy, softmax
from .optimizers import Adam, SGD
from .utils_nn import revert_one_hot_nn, revert_softmax_nn
self.Dense = Dense
self.Flatten = Flatten
self.Softmax = Softmax
self.Dropout = Dropout
self.CrossEntropy = CrossEntropy
self.softmax = softmax
self.Adam = Adam
self.SGD = SGD
self.revert_one_hot = revert_one_hot_nn
self.revert_softmax = revert_softmax_nn
self.layers = []
if layers: # self.layers = layers
for layer in layers:
self.layers.append(layer)
if layer.activation:
self.layers.append(layer.activation)
self.finalized = False
self.sgd_indices = ...
self.adam_t = 0
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=np.ComplexWarning)
def num_parameters(self):
num_params = 0
for layer in self.layers:
if isinstance(layer, self.Dense):
num_params += (
np.product(layer.parameters["weights"].shape)
+ layer.parameters["bias"].shape
)
return num_params
def enter_parameters(self, parameters):
for layer_num in range(len(self.layers)):
try:
if (
parameters[layer_num]["weights"] == None
and parameters[layer_num]["bias"] == None
):
continue
except Exception:
self.layers[layer_num].parameters["weights"] = parameters[
layer_num
]["weights"]
self.layers[layer_num].parameters["bias"] = parameters[
layer_num
]["bias"]
def give_parameters(self):
parameters = []
for layer in self.layers:
try:
parameters.append(
{
"weights": layer.parameters["weights"],
"bias": layer.parameters["bias"],
}
)
except Exception:
parameters.append({"weights": None, "bias": None})
return parameters
def pickle_params(self, FILE_NAME="NeuralNetwork_learnt_parameters"):
parameters = NeuralNetwork.give_parameters(self)
import pickle
with open(FILE_NAME + ".pickle", "wb") as f:
pickle.dump(parameters, f, protocol=pickle.HIGHEST_PROTOCOL)
def add(self, layer):
self.layers.append(layer)
if layer.activation:
self.layers.append(layer.activation)
def forward(self, inputs):
for layer in self.layers:
if isinstance(layer, self.Softmax):
continue
inputs = layer.forward(inputs)
return inputs
def backward(self, grad):
for layer in reversed(self.layers):
if isinstance(layer, self.Softmax):
continue
grad = layer.backward(grad)
return grad
def finalize(self, loss, optimizer):
"""Both of these have to be the classes for the loss and optimizations"""
self.loss, self.optimizer = loss, optimizer
self.finalized = True
def _sgd_updating_indices(self):
"""only call if the loss is SGD"""
for layer_num in range(len(self.layers)):
layer = self.layers[layer_num]
layer.indices = self.sgd_indices
self.layers[layer_num] = layer
def _chunk_train(self, data_chunk, label_chunk):
"""the actual training code!"""
predicted = NeuralNetwork.forward(self, data_chunk)
for layer_num in range(len(self.layers)):
if self.layers[layer_num].nesterov:
"""nesterov accel"""
try:
self.layers[layer_num].parameters["weights"] += (
self.optimizer.momentum
* self.optimizer.momentum_params[
"weights" + str(layer_num)
]
)
self.layers[layer_num].parameters["bias"] += (
self.optimizer.momentum
* self.optimizer.momentum_params[
"bias" + str(layer_num)
]
)
except Exception:
pass
if isinstance(self.optimizer, self.Adam):
self.adam_t += 1
self.optimizer.t = self.adam_t
if isinstance(self.optimizer, self.SGD):
"""select indices for the gradients and update all layers"""
start_index = np.random.randint(len(predicted) - 3)
self.sgd_indices = slice(start_index, len(predicted) - 1)
NeuralNetwork._sgd_updating_indices(self)
grad = self.loss.grad(
label_chunk[self.sgd_indices], predicted[self.sgd_indices]
) # calculate the dL/dY
NeuralNetwork.backward(self, grad)
self.optimizer.update(self.layers)
def train(self, x_train, y_train, epochs=1, batch_size=32, show_loop=True):
if not self.finalized:
raise ValueError(
"This model isn't finalized. Call it through model.finalize()."
)
if isinstance(self.layers[-1], self.Softmax) and not isinstance(
self.loss, self.CrossEntropy
):
raise ValueError(
"If the last layer activation is softmax, you must be using CrossEntropy loss."
)
if isinstance(self.loss, self.CrossEntropy) and not isinstance(
self.layers[-1], self.Softmax
):
raise ValueError(
"If the loss function is crossentropy, you must be using Softmax as your last layer."
)
x_train, y_train = np.array(x_train), np.array(y_train)
if len(y_train.shape) != 2:
raise ValueError("y_train must be 2D.")
if (
not isinstance(self.layers[0], self.Flatten)
and len(x_train.shape) > 2
):
raise ValueError(
"There must be a Flatten layer in the beginning if you are working with data that is not "
"a matrix (e.g. something that is 60k * 28 * 28 not 60k * 784)"
)
# perform batch operations
if batch_size > x_train.shape[0]:
warnings.warn(
"Batch size is more than the number of samples, so we have set batch_size = 1 and are just performing full batch gradient descent."
)
batch_size = 1
x_train = np.array_split(x_train, batch_size)
y_train = np.array_split(y_train, batch_size)
self.optimizer.setup(self.layers)
epochs = (
tqdm(range(epochs), position=0, ncols=100)
if show_loop
else range(epochs)
)
evaluation_batch = random.randint(0, len(x_train) - 1)
evaluation_method = (
NeuralNetwork.regression_evaluate
if self.loss.type_regression
else NeuralNetwork.categorical_evaluate
)
for epoch in epochs:
Parallel(prefer="threads")(
delayed(NeuralNetwork._chunk_train)(
self, data_chunk, label_chunk
)
for data_chunk, label_chunk in zip(x_train, y_train)
)
if show_loop:
epochs.set_description(
"Acc : "
+ str(
round(
evaluation_method(
self,
x_train[evaluation_batch],
y_train[evaluation_batch],
)
* 100,
2,
)
)
+ "%"
)
# adjust dropout
for layer_num in range(len(self.layers)):
"""adjust the weights and biases if there is dropout"""
try:
layer = self.layers[layer_num]
if isinstance(layer, self.Dropout):
receiving_layer = self.layers[
layer_num + 1
] # this is the layer that receives the dropout
receiving_layer.parameters["weights"] *= 1 - layer.dr
receiving_layer.parameters["bias"] *= 1 - layer.dr
self.layers[layer_num + 1] = receiving_layer
except Exception:
pass
def full_batch_train(self, x_train, y_train, epochs=1, show_loop=True):
if not self.finalized:
raise ValueError(
"This model isn't finalized. Call it through model.finalize()."
)
if isinstance(self.layers[-1], self.Softmax) and not isinstance(
self.loss, self.CrossEntropy
):
raise ValueError(
"If the last layer activation is softmax, you must be using CrossEntropy loss."
)
if isinstance(self.loss, self.CrossEntropy) and not isinstance(
self.layers[-1], self.Softmax
):
raise ValueError(
"If the loss function is crossentropy, you must be using Softmax as your last layer."
)
x_train, y_train = np.array(x_train), np.array(y_train)
if len(y_train.shape) != 2:
raise ValueError("y_train must be 2D.")
if (
not isinstance(self.layers[0], self.Flatten)
and len(x_train.shape) > 2
):
raise ValueError(
"There must be a Flatten layer in the beginning if you are working with data that is not "
"a matrix (e.g. something that is 60k * 28 * 28 not 60k * 784)"
)
epochs = (
tqdm(range(epochs), position=0, ncols=100)
if show_loop
else range(epochs)
)
evaluation_method = (
NeuralNetwork.regression_evaluate
if self.loss.type_regression
else NeuralNetwork.categorical_evaluate
)
self.optimizer.setup(self.layers) # setup the optimizer
for epoch in epochs:
NeuralNetwork._chunk_train(self, x_train, y_train)
if show_loop == True:
epochs.set_description(
"Acc : "
+ str(
round(
evaluation_method(self, x_train, y_train) * 100, 2
)
)
+ "%"
)
# adjust dropout
for layer_num in range(len(self.layers)):
"""adjust the weights and biases if there is dropout"""
try:
layer = self.layers[layer_num]
if isinstance(layer, self.Dropout):
receiving_layer = self.layers[
layer_num + 1
] # this is the layer that receives the dropout
receiving_layer.parameters["weights"] *= 1 - layer.dr
receiving_layer.parameters["bias"] *= 1 - layer.dr
self.layers[layer_num + 1] = receiving_layer
except Exception:
pass
def mini_batch_train(
self, x_train, y_train, epochs=1, N=0.2, show_loop=True
):
if not self.finalized:
raise ValueError(
"This model isn't finalized. Call it through model.finalize()."
)
if isinstance(self.layers[-1], self.Softmax) and not isinstance(
self.loss, self.CrossEntropy
):
raise ValueError(
"If the last layer activation is softmax, you must be using CrossEntropy loss."
)
if isinstance(self.loss, self.CrossEntropy) and not isinstance(
self.layers[-1], self.Softmax
):
raise ValueError(
"If the loss function is crossentropy, you must be using Softmax as your last layer."
)
x_train, y_train = np.array(x_train), np.array(y_train)
if len(y_train.shape) != 2:
raise ValueError("y_train must be 2D.")
if (
not isinstance(self.layers[0], self.Flatten)
and len(x_train.shape) > 2
):
raise ValueError(
"There must be a Flatten layer in the beginning if you are working with data that is not "
"a matrix (e.g. something that is 60k * 28 * 28 not 60k * 784)"
)
if N == 1:
raise ValueError(
"N cannot be equal to 1, has to be between 0 and 1."
)
N *= len(x_train)
N = round(N)
epochs = (
tqdm(range(epochs), position=0, ncols=100)
if show_loop
else range(epochs)
)
evaluation_method = (
NeuralNetwork.regression_evaluate
if self.loss.type_regression
else NeuralNetwork.categorical_evaluate
)
self.optimizer.setup(self.layers)
for epoch in epochs:
# set up the mini-batch
start_index = random.randint(0, len(x_train) - N - 1)
end_index = | |
- all need looped over... - settings_inputs to be moved to switchboard
print Dir[l].split('/')[-2]
ntor = setting_ntor[1] #requested ntor mode number !!! NEEDS A FUNCTION !!!
#Create global 2D diagnostics folder and extract current simulation name
DirEquil2D = CreateNewFolder(Dir[l],'2DEquil_Plots/')
DirString = Dir[l].split('/')[-2]
SubString = DirString.split('_')[-1]
#Extract Kstep [-] & Time [ms] arrays from SEQ.harmonics & toroidal harmonics from energy_n.txt
SEQArray, KStepArray, TimeArray, ntorArray = ExtractMEGA_DataRanges(Dir[l], DataFile='harmonics')
try: DeltaKstep = KStepArray[1]-KStepArray[0] #KStep Interval [-]
except: DeltaKstep = KStepArray[0]
try: DeltaTime = TimeArray[1]-TimeArray[0] #Time Interval [ms]
except: DeltaTime = TimeArray[0]
KStepMod = len(KStepArray)/len(SEQArray) #KStep indices per SEQ [-]
ntor_tot = ntorArray[2] #Total number of positive & negative modes (Inc n=0)
ntor_pos = ntorArray[1] #Number of positive modes (Ignoring n=0)
ntor0Idx = ntorArray[0] #ntor = 0 index, contains (var0 + dvar) data
#Extract toroidal mode number array index (ntorIdx) from requested mode number (ntor)
ntorIdx = Set_ntorIdx(ntor,ntorArray)
#Set SEQ and Kstep ranges as requested - else default to max range
KStepRange,KStepStep = Set_KStepRange(KStepArray,setting_kstep)
SEQRange = Set_SEQRange(setting_SEQ)
#Extract relevant spatial normalisation factors
NormVariables,NormValues,NormUnits = ExtractMEGA_Normalisations(Dir[l])
ZMin = NormValues[NormVariables.index('bottom_sim')]; ZMax = NormValues[NormVariables.index('top_sim')]
Zgeo = NormValues[NormVariables.index('zaxis')]; Rgeo = NormValues[NormVariables.index('raxis')]
for j in range(SEQRange[0],SEQRange[1]):
#Set SEQIndex for current simulation folder
SEQ = j
#Extract and plot data for each timestep
for i in tqdm( range(KStepRange[0],KStepRange[1],KStepStep) ):
#Set TimeIndex and employ to extract KStep and Time
KStepIdx = i; #[-]
IdxOffset = SEQ*KStepMod #[-]
KStep = KStepArray[KStepIdx+IdxOffset] #[-]
Time = TimeArray[KStepIdx+IdxOffset] #[ms]
#Extract ALL VARIABLES FOR SINGLE KSTEP from Harmonics, it contains:
#HarmonicsData.rho_pol [1D array] :: HarmonicsData.q_psi [1D array]
#HarmonicsData.kst [1D array] :: HarmonicsData.time [1D array]
#HarmonicsData.Variables[i]: [3D Array] of shape [mpol][ntor][lpsi][A/B] for a single kstep
HarmonicsData = ExtractMEGA_Harmonics(Dir[l],'All',ntor_tot,KStepIdx,SEQ,'3D')
#Extract data resolution and poloidal axes from repository .dat files
#DataShape contains data resolution of form: [mpol,ntor,lpsi,ltheta]
Crdr,Crdz = ExtractMEGA_PoloidalGrid(DirRepository,HarmonicsData)
Crdz = [Crdz[x]+ZMin for x in range(0,len(Crdz))] #Offset vertical axis such that Z0 = Zgeo
DataShape = ExtractMEGA_DataShape(HarmonicsData)
mpol_res = DataShape[0]; ntor_res = DataShape[1]
lpsi_res = DataShape[2]; ltheta_res = DataShape[3]
#For each requested variable at the current Kstep
for j in range(0,len(variables)):
#Create global 2D diagnostics folder and extract current simulation name
DirMovie = CreateNewFolder(DirEquil2D,variables[j]+'_n'+str(ntor))
#Select variable and Merge 3D Data into 2D poloidal slice
PoloidalImage = Extract_PoloidalImage(HarmonicsData,variables[j],ntorIdx)
#==========#
#Create figure and define Title, Legend, Axis Labels etc...
fig,ax = figure(subplots=[1,1], aspectratio=image_aspectratio)
#Extract Variablelabel and define figure texts
VariableLabel = VariableLabelMaker(variables[j])
Title = VariableLabel+', n='+str(ntor)+', t='+str(round(Time,3))+' ms \n Simulation: '+DirString
Xlabel,Ylabel = 'Radius $R$ [m]', 'Height $Z$ [m]'
Legend = list()
#Plot 2D poloidally resolved figure and beautify
im = ax.contourf(Crdr, Crdz, PoloidalImage, 100); plt.axis('scaled')
im2 = ax.contour(Crdr, Crdz, PoloidalImage, 20); plt.axis('scaled')
cbar = Colourbar(ax,im,VariableLabel,5)
ImageOptions(fig,ax,Xlabel,Ylabel,Title,Legend)
#Save 2D poloidally resolved figure for current simulation
SaveString = variables[j]+'_n'+str(ntor)+'_kstep'+str('%07.f'%KStep)+ext
plt.savefig(DirMovie+SaveString)
# plt.show()
plt.close('all')
#==========#
if write_ASCII == True:
#Create directory to hold ASCII data
DirASCII = CreateNewFolder(DirEquil2D,'2DEquil_Data/')
DirASCII_Var = CreateNewFolder(DirASCII,variables[j]+'/')
#Set ASCII data file name string and header
SaveString = variables[j]+'_n'+str(ntor)+'_t='+str(round(Time,3))+'.dat'
Header = [VariableLabel,' ', 'R',lpsi_res, 'theta', ltheta_res, '\n']
#Write 1D data header, then 2D PoloidalImage
WriteFile_ASCII(Header, DirASCII_Var+SaveString, 'w', 'RSV')
WriteFile_ASCII(PoloidalImage, DirASCII_Var+SaveString, 'a', write_ASCIIFormat)
#endif
#endfor - Variable loop
#!!! AUTO CREATE MOVIES FOR EACH VARIABLE HERE !!!
#!!! AUTO CREATE MOVIES FOR EACH VARIABLE HERE !!!
#endfor - Kstep loop
#endfor - SEQ loop
#endfor - dir loop
#endif
#==========##==========##==========#
#==========##==========##==========#
if any([savefig_2Dequilibrium,savefig_2Dequilmovie]) == True:
print '--------------------------------'
print '2D Equilibrium Analysis Complete'
print '--------------------------------'
#endif
#====================================================================#
#====================================================================#
#====================================================================#
#TEMPORALLY/SPECTRALLY RESOLVED DIAGNOSTICS#
#====================================================================#
#====================================================================#
#1D POLOIDAL SPECTRUM ANALYSIS#
#====================================================================#
if savefig_1Dpolspectrum == True:
#For each detected simulation folder
for l in range(0,len(Dir)):
#DEVELOPMENT SETTINGS - all need looped over... - settings_inputs to be moved to switchboard
print Dir[l].split('/')[-2]
ntor = setting_ntor[1] #requested ntor mode number !!! NEEDS A FUNCTION !!!
variable = SpectralVariable #requested response variable !!! Need to impliment vrad etc...
# ~~~ TESTING PURPOSES TO BE MOVED TO SWITCHBOARD TESTING PURPOSES ~~~~ #
ntor = -2
setting_mpol = [3,10,1] #[5,9,1] #[-9,-5,1]
# ~~~ TESTING PURPOSES TO BE MOVED TO SWITCHBOARD TESTING PURPOSES ~~~~ #
#Create global 2D diagnostics folder and extract current simulation name
DirSpectral = CreateNewFolder(Dir[l],'1DSpectral_Plots/') #Spatio-Temporal Folder
DirSpectral_ntor = CreateNewFolder(DirSpectral,variable+'_ntor='+str(ntor)) #Spatio-Temporal Images Folder
DirString = Dir[l].split('/')[-2] #Full Simulation Name
SubString = DirString.split('_')[-1] #Simulation Nickname
#Extract Kstep [-] & Time [ms] arrays from SEQ.harmonics & toroidal harmonics from energy_n.txt
SEQArray, KStepArray, TimeArray, ntorArray = ExtractMEGA_DataRanges(Dir[l], DataFile='harmonics')
DeltaKstep = KStepArray[1]-KStepArray[0] #KStep Interval [-]
DeltaTime = TimeArray[1]-TimeArray[0] #Time Interval [ms]
KStepMod = len(KStepArray)/len(SEQArray) #KStep indices per SEQ [-]
ntor_tot = ntorArray[2] #Total number of positive & negative modes (Inc n=0)
ntor_pos = ntorArray[1] #Number of positive modes (Ignoring n=0)
ntor0 = ntorArray[0] #ntor = 0, baseline equilibrium data
#Extract Energy_n outputs and header for plotting
#energy_n: [ntor][timestep]
Energy_n,Header_n = ExtractMEGA_Energy(Dir[l],'energy_n')
Energy_TimeArray = Energy_n[1] #Extract full time array [ms] for plotting
Energy_n = Energy_n[2::] #Remove KStep and Time arrays from array
#Extract toroidal mode number array index (ntorIdx) from requested mode number (ntor)
ntorIdx = Set_ntorIdx(ntor,ntorArray)
#Set Kstep ranges as requested - else default to max range
KStepRange,KStepStep = Set_KStepRange(KStepArray,setting_kstep)
SEQRange = Set_SEQRange(setting_SEQ)
#Extract Variablelabel for chosen variable
VariableLabel = VariableLabelMaker(variable) #Units='Perturbation [-]'
for j in tqdm( range(SEQRange[0],SEQRange[1]) ):
#Set SEQIndex for current simulation folder
SEQ = j
#Extract and plot data for each timestep
for i in range(KStepRange[0],KStepRange[1],KStepStep):
#Set TimeIndex and employ to extract KStep and Time
KStepIdx = i #[-]
IdxOffset = SEQ*KStepMod #[-]
KStep = KStepArray[KStepIdx+IdxOffset] #[-]
Time = TimeArray[KStepIdx+IdxOffset] #[ms]
#Extract ALL VARIABLES FOR SINGLE KSTEP from Harmonics, it contains:
#HarmonicsData.rho_pol [1D array] :: HarmonicsData.q_psi [1D array]
#HarmonicsData.kst [1D array] :: HarmonicsData.time [1D array]
#HarmonicsData.Variables[i]: [3D Array] of shape [mpol][ntor][lpsi][A/B] for a single kstep
HarmonicsData = ExtractMEGA_Harmonics(Dir[l],'All',ntor_tot,KStepIdx,SEQ,'3D')
rho_pol = HarmonicsData.rho_pol; q_psi = HarmonicsData.q_psi
#DataShape contains data resolution of form: [mpol,ntor,lpsi,ltheta]
DataShape = ExtractMEGA_DataShape(HarmonicsData)#; print DataShape
mpol_res = DataShape[0]; ntor_res = DataShape[1]
lpsi_res = DataShape[2]; ltheta_res = DataShape[3]
#Extract radial magnetic field (brad) from SEQ.harmonic object
#Data is of shape: Data[mpol,ntor,lpsi,A/B]
Data = getattr(HarmonicsData, variable)
#Combine spectral components A and B in quadrature to obtain variable amplitude
#Pos corresponds to resonant poloidal modes i.e. +m on RHS of image
#Neg corresponds to non-resonant poloidal modes i.e. -m on LHS of image
#One of ntor_pos-ntor or ntor_pos+ntor will equal 0, representing the equilibrium values.
DataAmpPos = np.sqrt( (Data[:, ntor_pos-ntor,:,0]**2) + (Data[:, ntor_pos-ntor,:,1]**2) )
DataAmpNeg = np.sqrt( (Data[:, ntor_pos+ntor,:,0]**2) + (Data[:, ntor_pos+ntor,:,1]**2) )
DataAmpNeg = np.flip( DataAmpNeg,axis=0) #Flip LHS of image for plotting
#Concat positive and negative ntor to obtain full poloidal harmonic spectrum
#DataAmp is of shape: [2*mpol-1][lpsi]
DataAmp = np.concatenate((DataAmpNeg,DataAmpPos[1:,:]),axis=0)
#Define mpol data index and true value ranges applying user settings
#mpol index ranges :: 0 to (2*mpol_res)+1, starting at zero
#mpol harmonic ranges :: -mpol_res to mpol_res, including zero
mpol_idxmin = setting_mpol[0] + (mpol_res-1)
mpol_idxmax = setting_mpol[1] + (mpol_res)
mpol_idxrange = [mpol_idxmin, mpol_idxmax]
mpol_valrange = [mpol_idxmin-mpol_res+1, mpol_idxmax-mpol_res]
mpol_step = setting_mpol[2]
#==========##==========#
#Create figure
fig,ax = figure(subplots=[1,1], aspectratio=image_aspectratio)
#Plot 1D radial profiles (rho_pol) of poloidal perturbation amplitude over mpol_range
Legend = list()
for k in range(mpol_idxrange[0],mpol_idxrange[1],mpol_step):
mpol_idx = k; #mpol HarmonicsData/DataAmp array index
mpol = k-mpol_res+1 #mpol real harmonic number (+1 accounts for mpol=0)
#Structure: DataAmp[rho_pol][mpol]
ax.plot(rho_pol,DataAmp[:][mpol_idx], lw=2)
Legend.append('$m_{pol}$: '+str(mpol))
#Alternative idea for plotting - plot difference between Pure-MHD and total perturbations
# k=2 #Overwrite k for testing purposes
# DataAmpNeg = np.flip( DataAmpNeg,axis=0) #Flip it back so 'k' indices align
# AmpDiff = DataAmpPos[:][k] - DataAmpNeg[:][k] #Amplitude difference for mpol == k
# ax.plot(rho_pol,DataAmpNeg[:][k], 'k--', lw=1) #negative mpol = pure MHD perturbation
# ax.plot(rho_pol,DataAmpPos[:][k], 'r--', lw=1) #positive mpol = total MHD perturbation
# ax.plot(rho_pol,AmpDiff, 'b-', lw=2) #Plasma response portion of MHD pert.
#endif
#endfor
#####
Title = 'Poloidal Spectrum: n='+str(ntor)+', m='+str(setting_mpol[0])+','+str(setting_mpol[1])+', t='+str(round(Time,3))+' [ms] \n Simulation: '+DirString
Xlabel,Ylabel = 'Normalised Minor Radius $\\rho_{pol}$ [-]', VariableLabel
ImageOptions(fig,ax,Xlabel,Ylabel,Title,'')
# ax.set_xlim(image_rhopolcrop[0],image_rhopolcrop[1])
ax.set_ylim(0,0.8)
ax.legend(Legend, fontsize=18, frameon=False)
ax.set_xlabel('Normalised Minor Radius $\\rho_{pol}$ [-]', fontsize=18)
ax.set_ylabel(VariableLabel, fontsize=18)
#Save poloidal spectrum figure for current SEQ and Kstep
SaveString = 'PoloidalSpectrum_'+variable+'_n'+str(ntor)+'_kstep'+str('%07.f'%KStep)+ext
plt.savefig(DirSpectral_ntor+SaveString)
# plt.show()
plt.close('all')
#==========##==========#
#==========##==========#
if write_ASCII == True:
DirASCII = CreateNewFolder(DirSpectral_ntor,"ASCII_Data") #Spatio-Temporal Data Folder
#Save Yaxis (rho_pol) and safety factor for future plotting
WriteFile_ASCII(rho_pol, DirASCII+'rho_pol', 'w', write_ASCIIFormat)
WriteFile_ASCII(q_psi, DirASCII+'q_psi', 'w', write_ASCIIFormat)
#Write 1D data header, then 1D radially resolved poloidal perturbation amplitude array
mpolString = '_m'+str(mpol_valrange[0])+':'+str(mpol_valrange[1])
ntorString = '_n'+str(ntor)
TimeString = '_t='+str(round(Time,3))
SaveString = '1DPolSpectrum_'+variable+ntorString+mpolString+TimeString+'.dat'
Header = [VariableLabel,' ', 'mpol',mpol_valrange[0],mpol_valrange[-1], 'rhopol',rho_pol[0],rho_pol[-1], '\n']
WriteFile_ASCII(Header, DirASCII+SaveString, 'w', 'RSV')
# DataAmp is of shape: [2*mpol-1][lpsi]
ASCII_Output = DataAmp[:][mpol_idxrange[0]:mpol_idxrange[1]]
WriteFile_ASCII(ASCII_Output, DirASCII+SaveString, 'a', write_ASCIIFormat)
#endif - Write_ASCII
#endfor - KStep loop
#endfor - SEQ loop
#endfor - Dir loop
#endif - Diag loop
#==========##==========##==========#
#==========##==========##==========#
if any([savefig_1Dpolspectrum]) == True:
print '--------------------------------------'
print '1D Poloidal Spectrum Analysis Complete'
print '--------------------------------------'
#endif
#====================================================================#
#====================================================================#
#====================================================================#
#2D POLOIDAL SPECTRUM ANALYSIS#
#====================================================================#
if savefig_2Dpolspectrum == True:
#For each detected simulation folder
for l in range(0,len(Dir)):
#DEVELOPMENT SETTINGS - all need looped over... - settings_inputs to be moved to switchboard
print Dir[l].split('/')[-2]
ntor = setting_ntor[1] #requested ntor mode number !!! NEEDS A FUNCTION !!!
variable = SpectralVariable #requested response variable !!! Need to impliment vrad etc...
#Initiate any required lists
DataAmpPROES_pol,DataAmpPROES_rad = list(),list()
XaxisPROES = list()
#Create global 2D diagnostics folder and extract current simulation name
DirSpectral = CreateNewFolder(Dir[l],'2DSpectral_Plots/') #Spatio-Temporal Folder
DirSpectral_ntor = CreateNewFolder(DirSpectral,variable+'_ntor='+str(ntor)) #Spatio-Temporal Images Folder
DirString = Dir[l].split('/')[-2] #Full Simulation Name
SubString = DirString.split('_')[-1] #Simulation Nickname
#Extract Kstep [-] & Time [ms] arrays from SEQ.harmonics & toroidal harmonics from energy_n.txt
SEQArray, KStepArray, TimeArray, ntorArray = ExtractMEGA_DataRanges(Dir[l], DataFile='harmonics')
DeltaKstep = KStepArray[1]-KStepArray[0] #KStep Interval [-]
DeltaTime = TimeArray[1]-TimeArray[0] #Time Interval [ms]
KStepMod = len(KStepArray)/len(SEQArray) #KStep indices per SEQ [-]
ntor_tot = ntorArray[2] #Total number of positive & negative modes (Inc n=0)
ntor_pos = ntorArray[1] #Number of positive modes (Ignoring n=0)
ntor0 = ntorArray[0] #ntor = 0, baseline equilibrium data
#Extract Energy_n outputs and header for plotting
#energy_n: [ntor][timestep]
Energy_n,Header_n = ExtractMEGA_Energy(Dir[l],'energy_n')
Energy_TimeArray = Energy_n[1] #Extract full time array [ms] for plotting
Energy_n = Energy_n[2::] #Remove KStep and Time arrays from array
#Extract toroidal mode number array index (ntorIdx) from requested mode number (ntor)
ntorIdx = Set_ntorIdx(ntor,ntorArray)
#Set Kstep ranges as requested - else default to max range
KStepRange,KStepStep = Set_KStepRange(KStepArray,setting_kstep)
SEQRange = Set_SEQRange(setting_SEQ)
#Extract Variablelabel for chosen variable
VariableLabel = VariableLabelMaker(variable) #Units='Perturbation [-]'
for j in tqdm( | |
<gh_stars>0
import logging
from typing import Dict, List, Optional, Union
import copy
import torch
import torch.nn.functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.common.params import Params
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, FeedForward, TimeDistributed
from allennlp.modules.span_extractors import EndpointSpanExtractor, SelfAttentiveSpanExtractor
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
# Import submodules.
from spanmb.models.ner import NERTagger
from spanmb.models.relation import RelationExtractor
from spanmb.data.dataset_readers import document
from spanmb.models.span_extractor import MaxSpanExtractor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("spanmb")
class SpanMB(Model):
"""
TODO(dwadden) document me.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
submodule_params: ``TODO(dwadden)``
A nested dictionary specifying parameters to be passed on to initialize submodules.
max_span_width: ``int``
The maximum width of candidate spans.
target_task: ``str``:
The task used to make early stopping decisions.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
module_initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the individual modules.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
display_metrics: ``List[str]``. A list of the metrics that should be printed out during model
training.
"""
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
modules, # TODO(dwadden) Add type.
feature_size: int,
max_span_width: int,
target_task: str,
feedforward_params: Dict[str, Union[int, float]],
loss_weights: Dict[str, float],
initializer: InitializerApplicator = InitializerApplicator(),
module_initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
display_metrics: List[str] = None) -> None:
super(SpanMB, self).__init__(vocab, regularizer)
####################
# Create span extractor.
self._endpoint_span_extractor = EndpointSpanExtractor(
embedder.get_output_dim(),
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size,
bucket_widths=False)
self._max_span_extractor = MaxSpanExtractor(
embedder.get_output_dim()
)
# max pooling on span and span width feature
# self._max_span_extractor = MaxSpanExtractor(
# embedder.get_output_dim(),
# num_width_embeddings=max_span_width,
# span_width_embedding_dim=feature_size,
# bucket_widths=False
# )
####################
# Set parameters.
self._embedder = embedder
self._loss_weights = loss_weights
self._max_span_width = max_span_width
self._display_metrics = self._get_display_metrics(target_task)
token_emb_dim = self._embedder.get_output_dim()
span_emb_dim = self._endpoint_span_extractor.get_output_dim() + self._max_span_extractor.get_output_dim()
####################
# Create submodules.
modules = Params(modules)
# Helper function to create feedforward networks.
def make_feedforward(input_dim):
return FeedForward(input_dim=input_dim,
num_layers=feedforward_params["num_layers"],
hidden_dims=feedforward_params["hidden_dims"],
activations=torch.nn.ReLU(),
dropout=feedforward_params["dropout"])
# Submodules
self._ner = NERTagger.from_params(vocab=vocab,
make_feedforward=make_feedforward,
span_emb_dim=span_emb_dim,
params=modules.pop("ner"))
self._relation = RelationExtractor.from_params(vocab=vocab,
make_feedforward=make_feedforward,
token_emb_dim=token_emb_dim,
span_emb_dim=span_emb_dim,
feature_size=feature_size,
params=modules.pop("relation"))
####################
# Initialize text embedder and all submodules
for module in [self._ner, self._relation]:
module_initializer(module)
initializer(self)
@staticmethod
def _get_display_metrics(target_task):
"""
The `target` is the name of the task used to make early stopping decisions. Show metrics
related to this task.
"""
lookup = {
"ner": [f"MEAN__{name}" for name in
["ner_precision", "ner_recall", "ner_f1"]],
"relation": [f"MEAN__{name}" for name in
["relation_precision", "relation_recall", "relation_f1"]]}
if target_task not in lookup:
raise ValueError(f"Invalied value {target_task} has been given as the target task.")
return lookup[target_task]
@staticmethod
def _debatch(x):
# TODO(dwadden) Get rid of this when I find a better way to do it.
return x if x is None else x.squeeze(0)
@overrides
def forward(self,
text,
spans,
metadata,
ner_labels=None,
ner_nested=None,
relation_labels=None):
"""
TODO(dwadden) change this.
"""
# In AllenNLP, AdjacencyFields are passed in as floats. This fixes it.
if relation_labels is not None:
relation_labels = relation_labels.long()
# TODO(dwadden) Multi-document minibatching isn't supported yet. For now, get rid of the
# extra dimension in the input tensors. Will return to this once the model runs.
if len(metadata) > 1:
raise NotImplementedError("Multi-document minibatching not yet supported.")
metadata = metadata[0]
spans = self._debatch(spans) # (n_sents, max_n_spans, 2)
ner_labels = self._debatch(ner_labels) # (n_sents, max_n_spans)
if ner_nested is not None:
ner_nested = self._debatch(ner_nested)
relation_labels = self._debatch(relation_labels) # (n_sents, max_n_spans, max_n_spans)
# print("batch_si=ze: ", spans.size(0))
# Encode using BERT, then debatch.
# Since the data are batched, we use `num_wrapping_dims=1` to unwrap the document dimension.
# (1, n_sents, max_sententence_length, embedding_dim)
# TODO(dwadden) Deal with the case where the input is longer than 512.
text_embeddings = self._embedder(text, num_wrapping_dims=1)
# (n_sents, max_n_wordpieces, embedding_dim)
text_embeddings = self._debatch(text_embeddings)
# (n_sents, max_sentence_length)
text_mask = self._debatch(util.get_text_field_mask(text, num_wrapping_dims=1).float())
sentence_lengths = text_mask.sum(dim=1).long() # (n_sents)
span_mask = (spans[:, :, 0] >= 0).float() # (n_sents, max_n_spans)
# SpanFields return -1 when they are used as padding. As we do some comparisons based on
# span widths when we attend over the span representations that we generate from these
# indices, we need them to be <= 0. This is only relevant in edge cases where the number of
# spans we consider after the pruning stage is >= the total number of spans, because in this
# case, it is possible we might consider a masked span.
spans = F.relu(spans.float()).long() # (n_sents, max_n_spans, 2)
# Shape: (batch_size, num_spans, 3 * encoding_dim + feature_size)
span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
max_span_embeddings = self._max_span_extractor(text_embeddings, spans)
span_embeddings = torch.cat([max_span_embeddings, span_embeddings], -1)
# Make calls out to the modules to get results.
output_ner = {'loss': 0}
output_relation = {'loss': 0}
# # Prune and compute span representations for relation module
# if self._loss_weights["relation"] > 0 or self._relation.rel_prop > 0:
# output_relation = self._relation.compute_representations(
# spans, span_mask, span_embeddings, text_embeddings, sentence_lengths, metadata)
# if self._relation.rel_prop > 0:
# output_relation = self._relation.relation_propagation(output_relation)
# span_embeddings = self.update_span_embeddings(span_embeddings, span_mask,
# output_relation["top_span_embeddings"],
# output_relation["top_span_mask"],
# output_relation["top_span_indices"])
# Prune and compute span representations for relation module
if self._loss_weights["relation"] > 0:
output_relation = self._relation.compute_representations(
spans, span_mask, span_embeddings, text_embeddings, sentence_lengths, metadata)
# Make predictions and compute losses for each module
if self._loss_weights['ner'] > 0:
output_ner = self._ner(
spans, span_mask, span_embeddings, sentence_lengths, ner_labels, ner_nested, metadata)
top_ner_predicted_labels, top_ner_gold_labels = self.get_top_span_ner_labels(
output_ner["predicted_ner"], output_ner["span_mask"], ner_labels,
output_relation["top_span_mask"], output_relation["top_span_indices"])
if self._loss_weights["relation"] > 0:
output_relation["top_ner_predicted_labels"] = top_ner_predicted_labels
output_relation["top_ner_gold_labels"] = top_ner_gold_labels
if self._loss_weights['relation'] > 0:
output_relation = self._relation.predict_labels(relation_labels, output_relation, metadata)
# if self._loss_weights['relation'] > 0:
# output_relation = self._relation(
# spans, span_mask, span_embeddings, sentence_lengths, relation_labels, metadata)
# Use `get` since there are some cases where the output dict won't have a loss - for
# instance, when doing prediction.
loss = (self._loss_weights['ner'] * output_ner.get("loss", 0) +
self._loss_weights['relation'] * output_relation.get("loss", 0))
# Multiply the loss by the weight multiplier for this document.
weight = metadata.weight if metadata.weight is not None else 1.0
loss *= torch.tensor(weight)
output_dict = dict(relation=output_relation,
ner=output_ner)
output_dict['loss'] = loss
output_dict["metadata"] = metadata
# torch.cuda.empty_cache()
return output_dict
def update_span_embeddings(self, span_embeddings, span_mask, top_span_embeddings,
top_span_mask, top_span_indices):
# TODO(Ulme) Speed this up by tensorizing
new_span_embeddings = span_embeddings.clone()
for sample_nr in range(len(top_span_mask)):
for top_span_nr, span_nr in enumerate(top_span_indices[sample_nr]):
if top_span_mask[sample_nr, top_span_nr] == 0 or span_mask[sample_nr, span_nr] == 0:
break
new_span_embeddings[sample_nr,
span_nr] = top_span_embeddings[sample_nr, top_span_nr]
return new_span_embeddings
def get_top_span_ner_labels(self, predicted_ner, span_mask, gold_ner_labels, top_span_mask, top_span_indices):
top_predicted_ner = top_span_mask.new_zeros([top_span_mask.size(0), top_span_mask.size(1)])
top_gold_ner = top_span_mask.new_zeros([top_span_mask.size(0), top_span_mask.size(1)])
for sample_nr in range(len(top_span_mask)):
for top_span_nr, span_nr in enumerate(top_span_indices[sample_nr]):
if top_span_mask[sample_nr, top_span_nr] == 0 or span_mask[sample_nr, span_nr] == 0:
break
top_predicted_ner[sample_nr, top_span_nr] = predicted_ner[sample_nr, span_nr]
if gold_ner_labels is not None:
top_gold_ner[sample_nr, top_span_nr] = gold_ner_labels[sample_nr, span_nr]
return top_predicted_ner, top_gold_ner
@overrides
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
doc = copy.deepcopy(output_dict["metadata"])
if self._loss_weights["ner"] > 0:
for predictions, sentence in zip(output_dict["ner"]["predictions"], doc):
sentence.predicted_ner = predictions
if self._loss_weights["relation"] > 0:
for predictions, sentence in zip(output_dict["relation"]["predictions"], doc):
sentence.predicted_relations = predictions
return doc
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
Get all metrics from all modules. For the ones that shouldn't be displayed, prefix their
keys with an underscore.
"""
metrics_ner = self._ner.get_metrics(reset=reset)
metrics_relation = self._relation.get_metrics(reset=reset)
# Make sure that there aren't any conflicting names.
metric_names = (list(metrics_ner.keys()) + list(metrics_relation.keys()))
assert len(set(metric_names)) == len(metric_names)
all_metrics = dict(list(metrics_ner.items()) +
list(metrics_relation.items()))
# If no list of desired metrics given, display them all.
if self._display_metrics is None:
return all_metrics
# Otherwise only display the selected ones.
res = {}
for k, v in | |
"set_segment_records\(\) missing 2 required ",
"positional arguments: 'values' and 'file'",
)
),
self.database.set_segment_records,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"delete_segment_records\(\) missing 2 required ",
"positional arguments: 'values' and 'file'",
)
),
self.database.delete_segment_records,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"insert_segment_records\(\) missing 2 required ",
"positional arguments: 'values' and 'file'",
)
),
self.database.insert_segment_records,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_record_number\(\) takes from 2 to 4 ",
"positional arguments but 5 were given",
)
),
self.database.recordlist_record_number,
*(None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_record_number_range\(\) takes from 2 to 5 ",
"positional arguments but 6 were given",
)
),
self.database.recordlist_record_number_range,
*(None, None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_ebm\(\) takes from 2 to 3 ",
"positional arguments but 4 were given",
)
),
self.database.recordlist_ebm,
*(None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"get_table_connection\(\) missing 1 required positional ",
"argument: 'file'",
)
),
self.database.get_table_connection,
)
def test_02_get_primary_record(self):
self.assertEqual(self.database.get_primary_record("file1", None), None)
def test_03_get_primary_record(self):
self.assertEqual(self.database.get_primary_record("file1", 1), None)
def test_04_get_primary_record(self):
self.database.put("file1", None, "new value")
self.assertEqual(
self.database.get_primary_record("file1", 1), (1, "new value")
)
def test_05_remove_record_from_ebm(self):
self.assertRaisesRegex(
_sqlite.DatabaseError,
"Existence bit map for segment does not exist",
self.database.remove_record_from_ebm,
*("file1", 2),
)
def test_06_remove_record_from_ebm(self):
self.assertEqual(self.database.add_record_to_ebm("file1", 2), (0, 2))
self.assertEqual(
self.database.remove_record_from_ebm("file1", 2), (0, 2)
)
def test_07_add_record_to_ebm(self):
self.assertEqual(self.database.add_record_to_ebm("file1", 2), (0, 2))
self.assertEqual(self.database.add_record_to_ebm("file1", 4), (0, 4))
def test_08_get_high_record(self):
self.assertEqual(self.database.get_high_record("file1"), None)
def test_09_get_segment_records(self):
self.database.insert_segment_records((12,), "file1")
self.assertEqual(self.database.get_segment_records(1, "file1"), 12)
def test_10_get_segment_records(self):
self.database.insert_segment_records((12,), "file1")
self.assertRaisesRegex(
_sqlite.DatabaseError,
"Segment record 2 missing in 'file1'",
self.database.get_segment_records,
*(2, "file1"),
)
def test_11_set_segment_records(self):
self.database.insert_segment_records((12,), "file1")
self.database.set_segment_records((13, 1), "file1")
self.assertEqual(self.database.get_segment_records(1, "file1"), 13)
def test_12_delete_segment_records(self):
self.database.delete_segment_records((12,), "file1")
def test_13_insert_segment_records(self):
self.assertEqual(
self.database.insert_segment_records((12,), "file1"), 1
)
def test_14_recordset_record_number(self):
self.assertIsInstance(
self.database.recordlist_record_number("file1"),
recordset.RecordList,
)
def test_15_recordset_record_number(self):
self.assertIsInstance(
self.database.recordlist_record_number("file1", key=500),
recordset.RecordList,
)
def test_16_recordset_record_number(self):
cursor = self.database.dbenv.cursor()
statement = " ".join(
(
"insert into",
"file1",
"(",
"file1",
",",
"Value",
")",
"values ( ? , ? )",
)
)
values = 1, "Some value"
cursor.execute(statement, values)
statement = " ".join(
(
"insert into",
"file1__ebm",
"(",
"file1__ebm",
",",
"Value",
")",
"values ( ? , ? )",
)
)
values = 1, b"\x740" + b"\x00" * (
SegmentSize.db_segment_size_bytes - 1
)
cursor.execute(statement, values)
rl = self.database.recordlist_record_number("file1", key=1)
self.assertIsInstance(rl, recordset.RecordList)
self.assertEqual(rl.count_records(), 1)
def test_17_recordset_record_number_range(self):
self.assertIsInstance(
self.database.recordlist_record_number_range("file1"),
recordset.RecordList,
)
def test_18_recordset_record_number_range(self):
self.create_ebm()
rs = self.database.recordlist_record_number_range(
"file1", keystart=0, keyend=2000
)
self.assertIsInstance(rs, recordset.RecordList)
self.assertEqual(
rs[0].tobytes(),
b"\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
def test_19_recordset_record_number_range(self):
self.create_ebm()
rs = self.database.recordlist_record_number_range("file1", keystart=10)
self.assertIsInstance(rs, recordset.RecordList)
self.assertEqual(
rs[0].tobytes(),
b"\x00\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
def test_20_recordset_record_number_range(self):
self.create_ebm()
rs = self.database.recordlist_record_number_range("file1", keyend=35)
self.assertIsInstance(rs, recordset.RecordList)
self.assertEqual(
rs[0].tobytes(),
b"\x7f\xff\xff\xff\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
)
def test_21_recordset_record_number_range(self):
self.create_ebm()
rs = self.database.recordlist_record_number_range(
"file1", keystart=10, keyend=35
)
self.assertIsInstance(rs, recordset.RecordList)
self.assertEqual(
rs[0].tobytes(),
b"\x00\x3f\xff\xff\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
)
def test_22_recordset_record_number_range(self):
self.create_ebm()
self.create_ebm_extra(2)
self.create_ebm_extra(3)
self.create_ebm_extra(4)
rs = self.database.recordlist_record_number_range(
"file1", keystart=170, keyend=350
)
self.assertIsInstance(rs, recordset.RecordList)
self.assertEqual(len(rs), 2)
self.assertEqual(
rs[1].tobytes(),
b"\x00\x00\x00\x00\x00\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
self.assertEqual(
rs[2].tobytes(),
b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfc\x00\x00\x00\x00",
)
def test_23_recordset_record_number_range(self):
self.create_ebm()
self.create_ebm_extra(2)
self.create_ebm_extra(3)
self.create_ebm_extra(4)
rs = self.database.recordlist_record_number_range(
"file1", keystart=350, keyend=170
)
self.assertIsInstance(rs, recordset.RecordList)
self.assertEqual(len(rs), 0)
def test_24_recordset_ebm(self):
self.assertIsInstance(
self.database.recordlist_ebm("file1"), recordset.RecordList
)
def test_25_recordset_ebm(self):
self.create_ebm()
self.assertIsInstance(
self.database.recordlist_ebm("file1"), recordset.RecordList
)
def test_26_get_table_connection(self):
self.assertIsInstance(
self.database.get_table_connection("file1"), dbe_module.Connection
)
def create_ebm(self):
cursor = self.database.dbenv.cursor()
statement = " ".join(
(
"insert into",
"file1__ebm",
"(",
"file1__ebm",
",",
"Value",
")",
"values ( ? , ? )",
)
)
values = 1, b"\x7f" + b"\xff" * (SegmentSize.db_segment_size_bytes - 1)
cursor.execute(statement, values)
def create_ebm_extra(self, segment):
cursor = self.database.dbenv.cursor()
statement = " ".join(
(
"insert into",
"file1__ebm",
"(",
"file1__ebm",
",",
"Value",
")",
"values ( ? , ? )",
)
)
values = (
segment,
b"\xff" + b"\xff" * (SegmentSize.db_segment_size_bytes - 1),
)
cursor.execute(statement, values)
class Database_find_values(_SQLiteOpen):
def setUp(self):
super().setUp()
self.valuespec = ValuesClause()
self.valuespec.field = "field1"
def test_01_find_values(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"find_values\(\) missing 2 required positional arguments: ",
"'valuespec' and 'file'",
)
),
self.database.find_values,
)
def test_02_find_values(self):
self.valuespec.above_value = "b"
self.valuespec.below_value = "d"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_03_find_values(self):
self.valuespec.above_value = "b"
self.valuespec.to_value = "d"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_04_find_values(self):
self.valuespec.from_value = "b"
self.valuespec.to_value = "d"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_05_find_values(self):
self.valuespec.from_value = "b"
self.valuespec.below_value = "d"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_06_find_values(self):
self.valuespec.above_value = "b"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_07_find_values(self):
self.valuespec.from_value = "b"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_08_find_values(self):
self.valuespec.to_value = "d"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_09_find_values(self):
self.valuespec.below_value = "d"
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_10_find_values(self):
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")], []
)
def test_11_find_values(self):
cursor = self.database.dbenv.cursor()
statement = " ".join(
(
"insert into",
"file1_field1",
"(",
"field1",
")",
"values ( ? )",
)
)
values = ("d",)
cursor.execute(statement, values)
self.assertEqual(
[i for i in self.database.find_values(self.valuespec, "file1")],
["d"],
)
class Database_make_recordset(_SQLiteOpen):
def setUp(self):
super().setUp()
segments = (
b"\x7f\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
b"\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
b"\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00",
b"\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00",
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00",
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00",
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff",
b"\x00\x40\x00\x41",
b"\x00\x42\x00\x43\x00\x44",
)
self.segments = {}
keys = (
"a_o",
"aa_o",
"ba_o",
"bb_o",
"c_o",
"cep",
"deq",
)
self.keyvalues = {}
key_statement = " ".join(
(
"insert into file1_field1 (",
"field1",
",",
"Segment",
",",
"RecordCount",
",",
"file1",
")",
"values ( ? , ? , ? , ? )",
)
)
cursor = self.database.dbenv.cursor()
try:
for s in segments:
cursor.execute(
"insert into file1__segment ( RecordNumbers ) values ( ? )",
(s,),
)
self.segments[
cursor.execute(
"select last_insert_rowid() from file1__segment"
).fetchone()[0]
] = s
for e, k in enumerate(keys):
self.keyvalues[k] = e + 1
cursor.execute(
key_statement, (k, 0, 32 if e else 31, self.keyvalues[k])
)
self.keyvalues["tww"] = 8
cursor.execute(key_statement, ("tww", 0, 2, self.keyvalues["tww"]))
self.keyvalues["twy"] = 9
cursor.execute(key_statement, ("twy", 0, 2, self.keyvalues["twy"]))
cursor.execute(key_statement, ("one", 0, 1, 50))
cursor.execute(key_statement, ("nin", 0, 1, 100))
cursor.execute(key_statement, ("www", 0, 2, self.keyvalues["twy"]))
cursor.execute(key_statement, ("www", 1, 2, self.keyvalues["twy"]))
finally:
cursor.close()
def test_01(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"add_record_to_field_value\(\) missing 5 required ",
"positional arguments: 'file', 'field', 'key', 'segment', ",
"and 'record_number'",
)
),
self.database.add_record_to_field_value,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"remove_record_from_field_value\(\) missing 5 required ",
"positional arguments: 'file', 'field', 'key', 'segment', ",
"and 'record_number'",
)
),
self.database.remove_record_from_field_value,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"populate_segment\(\) missing 2 required ",
"positional arguments: 'segment_reference' and 'file'",
)
),
self.database.populate_segment,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_key_like\(\) takes from 3 to 5 ",
"positional arguments but 6 were given",
)
),
self.database.recordlist_key_like,
*(None, None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_key\(\) takes from 3 to 5 ",
"positional arguments but 6 were given",
)
),
self.database.recordlist_key,
*(None, None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_key_startswith\(\) takes from 3 to 5 ",
"positional arguments but 6 were given",
)
),
self.database.recordlist_key_startswith,
*(None, None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_key_range\(\) takes from 3 to 8 ",
"positional arguments but 9 were given",
)
),
self.database.recordlist_key_range,
*(None, None, None, None, None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"recordlist_all\(\) takes from 3 to 4 ",
"positional arguments but 5 were given",
)
),
self.database.recordlist_all,
*(None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"unfile_records_under\(\) missing 3 required ",
"positional arguments: 'file', 'field', and 'key'",
)
),
self.database.unfile_records_under,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"file_records_under\(\) missing 4 required positional ",
"arguments: 'file', 'field', 'recordset', and 'key'",
)
),
self.database.file_records_under,
)
def test_02_add_record_to_field_value(self):
self.database.add_record_to_field_value(
"file1", "field1", "indexvalue", 1, 0
)
def test_03_add_record_to_field_value(self):
self.database.add_record_to_field_value(
"file1", "field1", "nin", 0, 99
)
def test_04_add_record_to_field_value(self):
self.database.add_record_to_field_value(
"file1", "field1", "twy", 0, 99
)
def test_05_add_record_to_field_value(self):
self.database.add_record_to_field_value(
"file1", "field1", "aa_o", 0, 99
)
def test_06_remove_record_from_field_value(self):
self.database.remove_record_from_field_value(
"file1", "field1", "indexvalue", 1, 0
)
def test_07_remove_record_from_field_value(self):
self.database.remove_record_from_field_value(
"file1", "field1", "nin", 0, 99
)
def test_08_remove_record_from_field_value(self):
self.database.remove_record_from_field_value(
"file1", "field1", "twy", 0, 68
)
def test_09_remove_record_from_field_value(self):
self.database.remove_record_from_field_value(
"file1", "field1", "bb_o", 0, 68
)
def test_10_remove_record_from_field_value(self):
self.database.remove_record_from_field_value(
"file1", "field1", "tww", 0, 65
)
def test_11_remove_record_from_field_value(self):
self.database.remove_record_from_field_value(
"file1", "field1", "one", 0, 50
)
def test_12_populate_segment(self):
s = self.database.populate_segment(("keyvalue", 2, 1, 3), "file1")
self.assertIsInstance(s, recordset.RecordsetSegmentInt)
def test_13_populate_segment(self):
ss = " ".join(
(
"select field1 , Segment , RecordCount , file1 from",
'file1_field1 where field1 == "one" and Segment == 0',
)
)
s = self.database.populate_segment(
self.database.dbenv.cursor().execute(ss).fetchone(), "file1"
)
self.assertIsInstance(s, recordset.RecordsetSegmentInt)
def test_14_populate_segment(self):
s = self.database.populate_segment(
("tww", 0, 2, self.keyvalues["tww"]), "file1"
)
self.assertIsInstance(s, | |
<filename>chi/plots/_time_series.py<gh_stars>1-10
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import numpy as np
import pandas as pd
import plotly.colors
import plotly.graph_objects as go
from chi import plots
class PDPredictivePlot(plots.SingleFigure):
"""
A figure class that visualises the predictions of a predictive
pharmacodynamic model.
Extends :class:`SingleFigure`.
Parameters
----------
updatemenu
Boolean flag that enables or disables interactive buttons, such as a
logarithmic scale switch for the y-axis.
"""
def __init__(self, updatemenu=True):
super(PDPredictivePlot, self).__init__(updatemenu)
def _add_data_trace(self, _id, times, measurements, color):
"""
Adds scatter plot of an indiviudals pharamcodynamics to figure.
"""
self._fig.add_trace(
go.Scatter(
x=times,
y=measurements,
name="ID: %d" % _id,
showlegend=True,
mode="markers",
marker=dict(
symbol='circle',
color=color,
opacity=0.7,
line=dict(color='black', width=1))))
def _add_prediction_scatter_trace(self, times, samples):
"""
Adds scatter plot of samples from the predictive model.
"""
# Get colour (light blueish)
color = plotly.colors.qualitative.Pastel2[1]
# Add trace
self._fig.add_trace(
go.Scatter(
x=times,
y=samples,
name="Predicted samples",
showlegend=True,
mode="markers",
marker=dict(
symbol='circle',
color=color,
opacity=0.7,
line=dict(color='black', width=1))))
def _add_prediction_bulk_prob_trace(self, data):
"""
Adds the bulk probabilities as two line plots (one for upper and lower
limit) and shaded area to the figure.
"""
# Construct times that go from min to max and back to min
# (Important for shading with 'toself')
times = data['Time'].unique()
times = np.hstack([times, times[::-1]])
# Get unique bulk probabilities and sort in descending order
bulk_probs = data['Bulk probability'].unique()
bulk_probs[::-1].sort()
# Get colors (shift start a little bit, because 0th level is too light)
n_traces = len(bulk_probs)
shift = 2
colors = plotly.colors.sequential.Blues[shift:shift+n_traces]
# Add traces
for trace_id, bulk_prob in enumerate(bulk_probs):
# Get relevant upper and lower percentiles
mask = data['Bulk probability'] == bulk_prob
reduced_data = data[mask]
upper = reduced_data['Upper'].to_numpy()
lower = reduced_data['Lower'].to_numpy()
values = np.hstack([upper, lower[::-1]])
# Add trace
self._fig.add_trace(go.Scatter(
x=times,
y=values,
line=dict(width=1, color=colors[trace_id]),
fill='toself',
legendgroup='Model prediction',
name='Predictive model',
text="%s Bulk" % bulk_prob,
hoverinfo='text',
showlegend=True if trace_id == n_traces-1 else False))
def _compute_bulk_probs(self, data, bulk_probs, time_key, sample_key):
"""
Computes the upper and lower percentiles from the predictive model
samples, corresponding to the provided bulk probabilities.
"""
# Create container for perecentiles
container = pd.DataFrame(columns=[
'Time', 'Upper', 'Lower', 'Bulk probability'])
# Translate bulk probabilities into percentiles
percentiles = []
for bulk_prob in bulk_probs:
lower = 0.5 - bulk_prob / 2
upper = 0.5 + bulk_prob / 2
percentiles.append([bulk_prob, lower, upper])
# Get unique times
unique_times = data[time_key].unique()
# Fill container with percentiles for each time
for time in unique_times:
# Mask relevant data
mask = data[time_key] == time
reduced_data = data[mask]
# Get percentiles
percentile_df = reduced_data[sample_key].rank(
pct=True)
for item in percentiles:
bulk_prob, lower, upper = item
# Get biomarker value corresponding to percentiles
mask = percentile_df <= lower
biom_lower = reduced_data[mask][sample_key].max()
mask = percentile_df >= upper
biom_upper = reduced_data[mask][sample_key].min()
# Append percentiles to container
container = container.append(pd.DataFrame({
'Time': [time],
'Lower': [biom_lower],
'Upper': [biom_upper],
'Bulk probability': [str(bulk_prob)]}))
return container
def add_data(
self, data, observable=None, id_key='ID', time_key='Time',
obs_key='Observable', value_key='Value'):
"""
Adds pharmacodynamic time series data of (multiple) individuals to
the figure.
Expects a :class:`pandas.DataFrame` with an ID, a time, an
observable and a value column, and adds a scatter plot of the
measured time series to the figure. Each individual receives a
unique colour.
Parameters
----------
data
A :class:`pandas.DataFrame` with the time series PD data in form of
an ID, time, and observable column.
observable
The predicted observable. This argument is used to determine the
relevant rows in the dataframe. If ``None``, the first observable
in the observable column is selected.
id_key
Key label of the :class:`DataFrame` which specifies the ID column.
The ID refers to the identity of an individual. Defaults to
``'ID'``.
time_key
Key label of the :class:`DataFrame` which specifies the time
column. Defaults to ``'Time'``.
obs_key
Key label of the :class:`DataFrame` which specifies the
observable column. Defaults to ``'Observable'``.
value_key
Key label of the :class:`DataFrame` which specifies the column of
the measured values. Defaults to ``'Value'``.
"""
# Check input format
if not isinstance(data, pd.DataFrame):
raise TypeError(
'Data has to be pandas.DataFrame.')
for key in [id_key, time_key, obs_key, value_key]:
if key not in data.keys():
raise ValueError(
'Data does not have the key <' + str(key) + '>.')
# Default to first bimoarker, if observable is not specified
biom_types = data[obs_key].unique()
if observable is None:
observable = biom_types[0]
if observable not in biom_types:
raise ValueError(
'The observable could not be found in the observable column.')
# Mask data for observable
mask = data[obs_key] == observable
data = data[mask]
# Get a colour scheme
colors = plotly.colors.qualitative.Plotly
n_colors = len(colors)
# Fill figure with scatter plots of individual data
ids = data[id_key].unique()
for index, _id in enumerate(ids):
# Get individual data
mask = data[id_key] == _id
times = data[time_key][mask]
measurements = data[value_key][mask]
color = colors[index % n_colors]
# Create Scatter plot
self._add_data_trace(_id, times, measurements, color)
def add_prediction(
self, data, observable=None, bulk_probs=[0.9], time_key='Time',
obs_key='Observable', value_key='Value'):
r"""
Adds the prediction to the figure.
Expects a :class:`pandas.DataFrame` with a time, an observable and a
value column. The time column determines the times of the
measurements and the value column the measured value.
The observable column determines the observable.
A list of bulk probabilities ``bulk_probs`` can be specified, which are
then added as area to the figure. The corresponding upper and lower
percentiles are estimated from the ranks of the provided
samples.
.. warning::
For low sample sizes the illustrated bulk probabilities may deviate
significantly from the theoretical bulk probabilities. The upper
and lower limit are determined from the rank of the samples for
each time point.
Parameters
----------
data
A :class:`pandas.DataFrame` with the time series PD simulation in
form of a time and observable column.
observable
The predicted observable. This argument is used to determine the
relevant rows in the dataframe. If ``None``, the first observable
in the observable column is selected.
bulk_probs
A list of bulk probabilities that are illustrated in the
figure. If ``None`` the samples are illustrated as a scatter plot.
time_key
Key label of the :class:`pandas.DataFrame` which specifies the time
column. Defaults to ``'Time'``.
obs_key
Key label of the :class:`pandas.DataFrame` which specifies the
observable column. Defaults to ``'Observable'``.
value_key
Key label of the :class:`pandas.DataFrame` which specifies the
value column. Defaults to ``'Value'``.
"""
# Check input format
if not isinstance(data, pd.DataFrame):
raise TypeError(
'Data has to be pandas.DataFrame.')
for key in [time_key, obs_key, value_key]:
if key not in data.keys():
raise ValueError(
'Data does not have the key <' + str(key) + '>.')
# Default to first bimoarker, if observable is not specified
biom_types = data[obs_key].dropna().unique()
if observable is None:
observable = biom_types[0]
if observable not in biom_types:
raise ValueError(
'The observable could not be found in the observable column.')
# Mask data for observable
mask = data[obs_key] == observable
data = data[mask]
# Add samples as scatter plot if no bulk probabilites are provided, and
# terminate method
if bulk_probs is None:
times = data[time_key]
samples = data[value_key]
self._add_prediction_scatter_trace(times, samples)
return None
# Not more than 7 bulk probabilities are allowed (Purely aesthetic
# criterion)
if len(bulk_probs) > 7:
raise ValueError(
'At most 7 different bulk probabilities can be illustrated at '
'the same time.')
# Make sure that bulk probabilities are between 0 and 1
bulk_probs = [float(probability) for probability in bulk_probs]
for probability in bulk_probs:
if (probability < 0) or (probability > 1):
raise ValueError(
'The provided bulk probabilities have to between 0 and 1.')
# Add bulk probabilities to figure
percentile_df = self._compute_bulk_probs(
data, bulk_probs, time_key, value_key)
self._add_prediction_bulk_prob_trace(percentile_df)
class PKPredictivePlot(plots.SingleSubplotFigure):
"""
A figure class that visualises the predictions of a predictive
pharmacokinetic model.
Extends :class:`SingleSubplotFigure`.
Parameters
----------
updatemenu
Boolean flag that enables or disables interactive buttons, such as a
logarithmic scale switch for the y-axis.
"""
def __init__(self, updatemenu=True):
super(PKPredictivePlot, self).__init__()
self._create_template_figure(
rows=2, cols=1, shared_x=True, row_heights=[0.2, 0.8])
# Define legend name of prediction
self._prediction_name = 'Predictive model'
if updatemenu:
self._add_updatemenu()
def _add_dose_trace(
self, _id, times, | |
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import pandas as pd
import warnings
from sklearn.linear_model import LinearRegression
import scipy.cluster.hierarchy as sch
import datetime
import random
class backtest_model:
"""
Given a user-defined portfolio construction strategy (a function that takes in stock-related data and returns portfolio weights) and
the data that the user wish the strategy to be tested on, calculate several evaluation metrics of the portfolio, including
net_returns, sharpe ratio, certainty equivalent returns, turnover, etc.
Various inputs can be modified to suit the needs of strategy and backtesting scenarios, such as price-impact models,
transaction costs, etc.
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot
:type missing_val: bool
"""
def __init__(self, strategy, involved_data_type, need_extra_data=False, trace_back=False, name='Unnamed', missing_val=False):
"""
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot. A wrapper function would be applied to the strategy function to deal with missing data. It will only pass in columns with full data and assign to other assets weight 0 while keeping the relative position the same. Warning: 1. The wrapper will slow the running speed significantly. 2. The wrapper does not cover missing data in "extra_data"..
:type missing_val: bool
"""
def wrapper(function, list_df, extra_data=pd.DataFrame(), historical_portfolios=pd.DataFrame()):
length = list_df[0].shape[1]
for frame in list_df:
if length >= len(frame.columns[frame.isna().any() == False]):
length = len(frame.columns[frame.isna().any() == False])
position_nan = frame.isna().any().values
w = np.zeros(list_df[0].shape[1])
if need_extra_data:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data, historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data)
else:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df])
return w
if not missing_val:
if name not in ['naive allocation portfolio',
'inverse variance allocation portfolio',
'min. variance allocation portfolio',
'basic mean-variance allocation portfolio',
'Fama-French 3-factor model portfolio',
'hierarchical-risk-parity portfolio',
'Bayes_Stein_shrinkage portfolio']:
warnings.warn('The library will deal with missing data. Running speed will be significantly reduced!')
if need_extra_data:
if trace_back:
self.__strategy = lambda x,y,z: wrapper(strategy, x,extra_data=y,historical_portfolios=z)
else:
self.__strategy = lambda x,y: wrapper(strategy, x,extra_data=y)
else:
if trace_back:
self.__strategy = lambda x,z: wrapper(strategy, x,historical_portfolios=z)
else:
self.__strategy = lambda x: wrapper(strategy, x)
else:
self.__strategy = strategy
if type(involved_data_type) != list:
raise Exception('"involved_data_type" must be given in a list')
else:
self.__involved_data_type = involved_data_type
if type(need_extra_data) != bool:
raise Exception('"need_extra_data" must be a bool variable')
else:
self.__need_extra_data = need_extra_data
if type(trace_back) != bool:
raise Exception('"trace_back" must be a bool variable')
else:
self.__trace_back = trace_back
if type(name) != str:
raise Exception('"name" must be a string variable')
else:
self.name = name
self.__last_test_frequency = None
self.__last_test_portfolios = None
self.__price_impact = False
self.__sharpe = None
self.__ceq = None
self.__average_turnover = None
self.__total_turnover = None
self.__net_returns = None
self.__net_excess_returns = None
# function to prepare data, including change of frequency, convert between price, return and ex_return
def __prepare_data(self, data, freq_data, data_type, rf, interval, window, freq_strategy,
volume=pd.DataFrame(), price_impact=False):
if not isinstance(data, pd.DataFrame):
raise Exception('Please provide correct format of test data!')
try:
data.index = pd.to_datetime(data.index)
except:
print(
'Invalid index provided in your test data, please make sure that index is in compatible datetime format')
volume.index = pd.to_datetime(volume.index)
data = data.copy()
if data_type == 'return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==return with interval>1 or change of frequency, Expect large amount of computational error')
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:,:-1]
risk_free_df=data.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
normal_return_df = data
excess_return_df = normal_return_df.sub(rf.values, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'ex_return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==ex_return with interval>1 or change of frequency, Expect large amount of computational error')
data = data.add(rf, axis=0)
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:, :-1]
risk_free_df = data.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
excess_return_df = data
normal_return_df = excess_return_df.add(rf, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'price':
#data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
rf_df=np.cumprod(1+rf)
if freq_data != freq_strategy:
data = data.resample(freq_strategy).ffill().fillna(method='ffill')
rf_df=rf_df.resample(freq_strategy).ffill().fillna(method='ffill')
if price_impact:
volume = volume.resample(freq_strategy).mean()
normal_return_df = data.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_df=rf_df.pct_change(fill_method=None).dropna(axis=0,how='all').loc[normal_return_df.index]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0)
if price_impact:
return (normal_return_df, excess_return_df, volume.loc[normal_return_df.index],
risk_free_df,
data.loc[normal_return_df.index])
else:
return (normal_return_df, excess_return_df, risk_free_df,
data.loc[normal_return_df.index])
# rebalance function to be applied to each rolling window of length (window)
def __rebalance(self, ex_return_df, normal_return_df, price_df, window, extra_data=None):
historical_portfolios = []
map = {'price': price_df, 'ex_return': ex_return_df, 'return': normal_return_df}
if self.__need_extra_data:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index]))
else:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type]))
return historical_portfolios
def __test_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, volume, c, initial_wealth, extra_data, price_impact_model='default',power=0.6):
# prepare data
normal_return_df, excess_return_df, volume, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy,
volume,
price_impact=True)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T | |
whitespace before newline
"""
# Arrange
source_markdown = """a![Foo](/uri\a\a
"testing")a
---""".replace(
"\a", " "
)
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: \n:]',
"[text(2,11):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_64b():
"""
Test case extra 64b: variation of 64 with whitespace after newline
"""
# Arrange
source_markdown = """a![Foo](/uri
"testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:"::\n :]',
"[text(2,12):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_64c():
"""
Test case extra 64c: variation of 64 with whitespace before and after newline
"""
# Arrange
source_markdown = """a![Foo](/uri\a\a
"testing")a
---""".replace(
"\a", " "
)
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: \n :]',
"[text(2,12):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_65():
"""
Test case extra 65: SetExt heading with inline image with newline after the URI and no text
"""
# Arrange
source_markdown = """a![Foo](/uri
)a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):inline:/uri::Foo::::Foo:False:::\n:]",
"[text(2,2):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_66():
"""
Test case extra 66: SetExt heading with inline image with newline in the title
"""
# Arrange
source_markdown = """a![Foo](/uri "test
ing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:test\ning:Foo::::Foo:False:":: :]',
"[text(2,6):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="test\ning" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_67():
"""
Test case extra 67: SetExt heading with inline image with newline after the title
"""
# Arrange
source_markdown = """a![Foo](/uri "testing"
)a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: :\n]',
"[text(2,2):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_67a():
"""
Test case extra 67a: variation of 67 with whitespace before newline
"""
# Arrange
source_markdown = """a![Foo](/uri "testing"\a\a
)a
---""".replace(
"\a", " "
)
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: : \n]',
"[text(2,2):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_67b():
"""
Test case extra 67b: variation of 67 with whitespace after newline
"""
# Arrange
source_markdown = """a![Foo](/uri "testing"
)a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: :\n ]',
"[text(2,5):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_67c():
"""
Test case extra 67c: variation of 67 with whitespace before and after newline
"""
# Arrange
source_markdown = """a![Foo](/uri "testing"\a\a
)a
---""".replace(
"\a", " "
)
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: : \n ]',
"[text(2,5):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_68x():
"""
Test case extra 68: SetExt heading with link containing label with replacement
"""
# Arrange
source_markdown = """a[Foβo](/uri "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:testing::::Foβo:False:":: :]',
"[text(1,3):Fo\aβ\aβ\ao:]",
"[end-link::]",
"[text(1,29):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="testing">Foβo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_68a():
"""
Test case extra 68a: variation of 68 without special characters
"""
# Arrange
source_markdown = """a[Foo](/uri "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:testing::::Foo:False:":: :]',
"[text(1,3):Foo:]",
"[end-link::]",
"[text(1,23):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="testing">Foo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_68b():
"""
Test case extra 68b: variation of 68 with newline before special characters
"""
# Arrange
source_markdown = """a[Fo
βo](/uri "testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:testing::::Fo\nβo:False:":: :]',
"[text(1,3):Fo\n\aβ\aβ\ao::\n]",
"[end-link::]",
"[text(2,25):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="testing">Fo\nβo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_69():
"""
Test case extra 69: SetExt heading with link containing label with backslash
"""
# Arrange
source_markdown = """a[Fo\\]o](/uri "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:testing::::Fo\\]o:False:":: :]',
"[text(1,3):Fo\\\b]o:]",
"[end-link::]",
"[text(1,25):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="testing">Fo]o</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_69a():
"""
Test case extra 69a: variation of 69 with newline before special characters
"""
# Arrange
source_markdown = """a[Fo
\\]o](/uri "testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:testing::::Fo\n\\]o:False:":: :]',
"[text(1,3):Fo\n\\\b]o::\n]",
"[end-link::]",
"[text(2,21):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="testing">Fo\n]o</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_70():
"""
Test case extra 70: SetExt heading with link containing uri with space
"""
# Arrange
source_markdown = """a[Foo](</my uri> "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/my%20uri:testing:/my uri:::Foo:True:":: :]',
"[text(1,3):Foo:]",
"[end-link::]",
"[text(1,28):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/my%20uri" title="testing">Foo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_70a():
"""
Test case extra 70a: variation of 70 with newline before special characters, rendering it invalid
"""
# Arrange
source_markdown = """a[Foo](</my
uri> "testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):Foo:]",
"[text(1,6):]:]",
'[text(1,7):(\a<\a<\a/my\nuri\a>\a>\a \a"\a"\atesting\a"\a"\a)a::\n \x02]',
"[end-setext::]",
]
expected_gfm = """<h2>a[Foo](</my\nuri> "testing")a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_71():
"""
Test case extra 71: SetExt heading with link containing title with replacement
"""
# Arrange
source_markdown = """a[Foo](/uri "testβing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:testβing::testβing::Foo:False:":: :]',
"[text(1,3):Foo:]",
"[end-link::]",
"[text(1,29):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="testβing">Foo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_71a():
"""
Test case extra 71a: variation of 71 with newline before special characters
"""
# Arrange
source_markdown = """a[Foo](/uri "test
βing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:test\nβing::test\nβing::Foo:False:":: :]',
"[text(1,3):Foo:]",
"[end-link::]",
"[text(2,12):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="test\nβing">Foo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_72():
"""
Test case extra 72: SetExt heading with link containing title with backslash
"""
# Arrange
source_markdown = """a[Foo](/uri "test\\#ing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:test#ing::test\\#ing::Foo:False:":: :]',
"[text(1,3):Foo:]",
"[end-link::]",
"[text(1,25):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="test#ing">Foo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_72a():
"""
Test case extra 72a: variation of 72 with newline before special characters
"""
# Arrange
source_markdown = """a[Foo](/uri "test
\\#ing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[link(1,2):inline:/uri:test\n#ing::test\n\\#ing::Foo:False:":: :]',
"[text(1,3):Foo:]",
"[end-link::]",
"[text(2,8):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<a href="/uri" title="test\n#ing">Foo</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_73():
"""
Test case extra 73: SetExt heading with image containing label with replacement
"""
# Arrange
source_markdown = """a![Foβo](/uri "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foβo::::Foβo:False:":: :]',
"[text(1,30):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foβo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_73a():
"""
Test case extra 73a: variation of 73 without special characters
"""
# Arrange
source_markdown = """a![Foo](/uri "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Foo::::Foo:False:":: :]',
"[text(1,24):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_73b():
"""
Test case extra 73b: 73 variation of with newline before special characters
"""
# Arrange
source_markdown = """a![Fo
βo](/uri "testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Fo\nβo::::Fo\nβo:False:":: :]',
"[text(2,25):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Fo\nβo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_74():
"""
Test case extra 74: SetExt heading with image containing label with backslash
"""
# Arrange
source_markdown = """a![Fo\\]o](/uri "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Fo]o::::Fo\\]o:False:":: :]',
"[text(1,26):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Fo]o" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_74a():
"""
Test case extra 74a: variation of 74 with newline before special characters
"""
# Arrange
source_markdown = """a![Fo
\\]o](/uri "testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testing:Fo\n]o::::Fo\n\\]o:False:":: :]',
"[text(2,21):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Fo\n]o" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_75():
"""
Test case extra 75: SetExt heading with image containing uri with space
"""
# Arrange
source_markdown = """a![Foo](</my uri> "testing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/my%20uri:testing:Foo:/my uri:::Foo:True:":: :]',
"[text(1,29):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/my%20uri" alt="Foo" title="testing" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_75a():
"""
Test case extra 75a: variation of 75 with newline before special characters, invalidating it
"""
# Arrange
source_markdown = """a![Foo](</my
uri> "testing")a
---"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[text(1,2):![:]",
"[text(1,4):Foo:]",
"[text(1,7):]:]",
'[text(1,8):(\a<\a<\a/my\nuri\a>\a>\a \a"\a"\atesting\a"\a"\a)a::\n \x02]',
"[end-setext::]",
]
expected_gfm = """<h2>a![Foo](</my\nuri> "testing")a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_76():
"""
Test case extra 76: SetExt heading with image containing title with replacement
"""
# Arrange
source_markdown = """a![Foo](/uri "testβing")a
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
'[image(1,2):inline:/uri:testβing:Foo::testβing::Foo:False:":: :]',
"[text(1,30):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/uri" alt="Foo" title="testβing" />a</h2>"""
# Act & Assert
| |
<gh_stars>0
# =========================================================================================================================
# File Name : preprocessing.py
# -------------------------------------------------------------------------------------------------------------------------
# Purpose : Purpose of this script is to remove the both irrelevant and repeative keywords
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Creation Date : 11-June-2020
# History
# -------------------------------------------------------------------------------------------------------------------------
# Date | Author | Co-Author | Remark
# 11-June-2020 | <NAME> | <NAME> and <NAME> | Initial Release
# =========================================================================================================================
# =========================================================================================================================
# Import required Module / Packages
# -------------------------------------------------------------------------------------------------------------------------
import nltk
import re
from bs4 import BeautifulSoup
import unicodedata
from contractions import CONTRACTION_MAP
from nltk.corpus import wordnet
from nltk.tokenize.toktok import ToktokTokenizer
import en_core_web_sm
from nltk.corpus import words
engwords = words.words()
import traceback
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Tokenizing the keywords
###########################################################################################################################
tokenizer = ToktokTokenizer()
stopword_list = nltk.corpus.stopwords.words('english')
# nlp = spacy.load('en', parse=True, tag=True, entity=True)
nlp = en_core_web_sm.load()
# nlp_vec = spacy.load('en_vectors_web_lg', parse=True, tag=True, entity=True)
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Reove html tags
###########################################################################################################################
def strip_html_tags(text):
soup = BeautifulSoup(text, "html.parser")
if bool(soup.find()):
[s.extract() for s in soup(['iframe', 'script'])]
stripped_text = soup.get_text()
stripped_text = re.sub(r'[\r|\n|\r\n]+', '\n', stripped_text)
stripped_text = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', " ", stripped_text)
else:
stripped_text = text
# print('Strip html tags completed')
return stripped_text
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Stemming of keywords in other words get into root word
###########################################################################################################################
def simple_porter_stemming(text):
ps = nltk.porter.PorterStemmer()
text = ' '.join([ps.stem(word) for word in text.split()])
# print('Stemming completed')
return text
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Lemetizaton of keywords in other words get into root words
###########################################################################################################################
def lemmatize_text(text):
text = nlp(text)
text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])
# print('Lemmatiation completed')
return text
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Removal repeated words
###########################################################################################################################
def remove_repeated_words(text):
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
seen = set()
seen_add = seen.add
def add(x):
seen_add(x)
return x
text = ' '.join(add(i) for i in tokens if i not in seen)
# print('remove repeated words completed')
return text
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Removal of repeated characters
###########################################################################################################################
def remove_repeated_characters(text):
repeat_pattern = re.compile(r'(\w*)(\w)\2(\w*)')
match_substitution = r'\1\2\3'
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
def replace(old_word):
if wordnet.synsets(old_word):
return old_word
new_word = repeat_pattern.sub(match_substitution, old_word)
return replace(new_word) if new_word != old_word else new_word
correct_tokens = [replace(word) for word in tokens]
# print('remove repeated characters')
return correct_tokens
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Expand the contractions which can be later tokenized or removed
###########################################################################################################################
def expand_contractions(text, contraction_mapping=CONTRACTION_MAP):
contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_mapping.get(match)\
if contraction_mapping.get(match)\
else contraction_mapping.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expanded_text = contractions_pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
# print('expand contractions completed')
return expanded_text
###########################################################################################################################
# Author : Tapas Mohanty
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Remove accented characters
###########################################################################################################################
def remove_accented_chars(text):
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
# print('removal accented chars')
return text
###########################################################################################################################
# Author : Tapas Mohanty
# Co-Author : <NAME> and <NAME>
# Modified :
# Functionality : Remove special characters
###########################################################################################################################
def remove_special_characters(text, remove_digits=False):
pattern = r'[^a-zA-Z0-9\s]|\[|\]' if not remove_digits else r'[^a-zA-Z\s]|\[|\]'
text = re.sub(pattern, '', text)
# print('removal special characters completed')
return text
###########################################################################################################################
# Author : Tapas Mohanty
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Remove stopwords which are listed in nltk library
###########################################################################################################################
def remove_stopwords(text, is_lower_case=False, stopwords = stopword_list):
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
if is_lower_case:
filtered_tokens = [token for token in tokens if token not in stopwords]
else:
filtered_tokens = [token for token in tokens if token.lower() not in stopwords]
filtered_text = ' '.join(filtered_tokens)
# print('removal stopwords completed')
return filtered_text
###########################################################################################################################
# Author : Tapas Mohanty
# Co-Author : <NAME> and <NAME>
# Modified :
# Functionality : Remove custom keywords which are mentioned in the text file i.e.
# stopwords.txt
###########################################################################################################################
def custom_stopwords(text, custok):
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
filtered_custokens = [token for token in tokens if token not in custok]
filtered_text = ' '.join(filtered_custokens)
# print('removal custom stopwords completed')
return filtered_text
###########################################################################################################################
# Author : Tapas Mohanty
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Remove non-engish Keywords
###########################################################################################################################
def get_keywords(text, eng_words = engwords):
tokens = tokenizer.tokenize(text)
eng_tokens = [token for token in tokens if token in eng_words]
eng_text = ' '.join(eng_tokens)
# print('removal of non-english keywords completed')
return eng_text
###########################################################################################################################
# Author : Tapas Mohanty
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Reomve those keywords which are present in other columns
###########################################################################################################################
def col_keyword(pData, pTktDesc, column):
try:
pData['combined'] = pData[column].apply(lambda row: ' '.join(row))
pData['Sample'] = ([' '.join(set(a.split(' ')).difference(set(b.split(' ')))) for a, b in zip(pData[pTktDesc], pData['combined'])])
del pData['combined']
except Exception as e:
print('*** Error[001]: ocurred while combining column',e)
return pData
###########################################################################################################################
# Author : <NAME>
# Co-Author : <NAME> and <NAME>
# Modified :
# Reviewer :
# Functionality : Import all the above functions menioned in ths script
###########################################################################################################################
def normalize_corpus(corpus, html_stripping= True, contraction_expansion= True,
accented_char_removal= True, text_lower_case= True,
text_stemming= False, text_lemmatization= True,
special_char_removal= True, remove_digits= True,
stopword_removal= True, ewords = True,
custm_stpwrds= True, stopwords=stopword_list,
remove_rptd_wrds= True, eng_words = engwords):
normalized_corpus = []
# normalize each document in the corpus
custok = []
with open('stopwords.txt', 'r') as f:
for word in f:
word = word.split('\n')
custok.append(word[0])
print('--> preprocessing started')
for index, doc in enumerate(corpus):
# print(index)
try:
# strip HTML
if html_stripping:
doc = strip_html_tags(doc)
except Exception as e:
raise(e)
print(traceback.format_exc())
print('*** Error[002]: preprocessing file ocurred in html_stripping on row no: ', index)
try:
# remove extra newlines
doc = doc.translate(doc.maketrans("\n\t\r", " "))
except Exception as e:
print(traceback.format_exc())
print('*** Error[003] preprocessing file ocurred on row no: ', index)
try:
# remove accented characters
if accented_char_removal:
doc = remove_accented_chars(doc)
except Exception as e:
raise(e)
print(traceback.format_exc())
print('*** Error[004]: preprocessing file ocurred in accented_char_removal on row no: ', index)
try:
# expand contractions
if contraction_expansion:
doc = expand_contractions(doc)
except Exception as e:
print('*** Error[005] preprocessing file ocurred in contraction_expansion on row no: ', index)
try:
# lemmatize text
if text_lemmatization:
doc = lemmatize_text(doc)
except Exception as e:
raise(e)
print('*** Error[006]: preprocessing file ocurred in text_lemmatization on row no: ', index)
try:
# stem text
if text_stemming and not text_lemmatization:
doc = simple_porter_stemming(doc)
except Exception as e:
print('*** Error[007]: ocurred in text_stemming on row no: ', index)
try:
# remove special characters and\or digits
if special_char_removal:
# insert spaces between special characters to isolate them
special_char_pattern = re.compile(r'([{.(-)!}])')
doc = special_char_pattern.sub(" \\1 ", doc)
doc = remove_special_characters(doc, remove_digits=remove_digits)
except Exception as e:
raise(e)
print('*** Error[008] preprocessing file ocurred in special_char_removal on row no: ', index)
try:
# remove extra whitespace
doc = re.sub(' +', ' ', doc)
except Exception as e:
print('*** Error[009]: preprocessing file ocurred on row no: ', index)
try:
# lowercase the text
if text_lower_case:
doc = doc.lower()
except Exception as e:
raise(e)
print(traceback.format_exc())
print('*** Error[010]: preprocessing file ocurred in text_lower_case on row no: ', index)
try:
# remove stopwords
if stopword_removal:
doc = remove_stopwords(doc, is_lower_case=text_lower_case, stopwords = stopwords)
except Exception as e:
raise(e)
print(traceback.format_exc())
print('*** Error[011]: preprocessing file ocurred in stopword_removal on row no: ', index)
try:
#Remove non-english keywords
if ewords:
doc = get_keywords(doc, eng_words = eng_words)
except Exception as e:
raise(e)
print(traceback.format_exc())
print('*** Error[012]: preprocessing file ocurred in ewords on row no: ', index)
try:
#Remove custom keywords
if custm_stpwrds:
doc = | |
import math
from functools import wraps, partial, reduce
from operator import mul
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.modules import FeedForwardNetwork, ESM1bLayerNorm, NormalizedResidualBlock, MLP
TOKEN_SELF_ATTN_VALUE = <PASSWORD> # carefully set for half precision to work
# We followed implement in https://github.com/lucidrains/reformer-pytorch
def chunked_sum(tensor, chunks=1):
*orig_size, last_dim = tensor.shape
tensor = tensor.reshape(-1, last_dim)
summed_tensors = [c.sum(dim=-1) for c in tensor.chunk(chunks, dim=0)]
return torch.cat(summed_tensors, dim=0).reshape(orig_size)
def process_inputs_chunk(fn, chunks=1, dim=0):
def inner_fn(*args, **kwargs):
keys, values, len_args = kwargs.keys(), kwargs.values(), len(args)
chunked_args = list(zip(*map(lambda x: x.chunk(chunks, dim=dim), list(args) + list(values))))
all_args = map(lambda x: (x[:len_args], dict(zip(keys, x[len_args:]))), chunked_args)
outputs = [fn(*c_args, **c_kwargs) for c_args, c_kwargs in all_args]
return tuple(map(lambda x: torch.cat(x, dim=dim), zip(*outputs)))
return inner_fn
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def exists(val):
return val is not None
def batched_index_select(values, indices):
last_dim = values.shape[-1] # batch gene dim[batch,gene,
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def default(val, default_val):
return default_val if val is None else val
def sort_key_val(t1, t2, dim=-1):
values, indices = t1.sort(dim=dim)
t2 = t2.expand_as(t1)
return values, t2.gather(dim, indices)
def expand_dim(dim, k, t):
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j=2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(qk, sinu_pos):
sinu_pos = sinu_pos.type(qk.dtype)
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j=2)
sin, cos = sinu_pos.unbind(dim=-2)
sin, cos = map(lambda t: repeat(t, 'n d -> n (d j)', j=2), (sin, cos))
seq_len = sin.shape[0]
qk, qk_pass = qk[:, :seq_len], qk[:, seq_len:]
qk = (qk * cos) + (rotate_every_two(qk) * sin)
return torch.cat((qk, qk_pass), dim=1)
def cache_method_decorator(cache_attr, cache_namespace, reexecute=False):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, key_namespace=None, fetch=False, set_cache=True, **kwargs):
namespace_str = str(default(key_namespace, ''))
_cache = getattr(self, cache_attr)
_keyname = f'{cache_namespace}:{namespace_str}'
if fetch:
val = _cache[_keyname]
if reexecute:
fn(self, *args, **kwargs)
else:
val = fn(self, *args, **kwargs)
if set_cache:
setattr(self, cache_attr, {**_cache, **{_keyname: val}})
return val
return wrapper
return inner_fn
class LSHAttention(nn.Module):
def __init__(self,
dropout=0.,
bucket_size=64,
n_hashes=4,
causal=False,
allow_duplicate_attention=True,
attend_across_buckets=True,
rehash_each_round=True,
drop_for_hash_rate=0.0,
random_rotations_per_head=False, dim_per_head=16,
return_attn=False):
super().__init__()
if dropout >= 1.0:
raise ValueError('Dropout rates must be lower than 1.')
self.dropout = nn.Dropout(dropout)
self.dropout_for_hash = nn.Dropout(drop_for_hash_rate)
assert rehash_each_round or allow_duplicate_attention, (
'The setting {allow_duplicate_attention=False, rehash_each_round=False}'
' is not implemented.')
self.causal = causal
self.bucket_size = bucket_size
self.n_hashes = n_hashes
self.dim_per_head = dim_per_head
self._allow_duplicate_attention = allow_duplicate_attention
self._attend_across_buckets = attend_across_buckets
self._rehash_each_round = rehash_each_round
self._random_rotations_per_head = random_rotations_per_head
# will expend extra computation to return attention matrix
self._return_attn = return_attn
# cache buckets for reversible network, reported by authors to make Reformer work at depth
self._cache = {}
@cache_method_decorator('_cache', 'buckets', reexecute=True)
def hash_vectors(self, n_buckets, vecs):
batch_size = vecs.shape[0]
device = vecs.device
assert n_buckets % 2 == 0
rot_size = n_buckets
rotations_shape = (
# batch_size if self._random_rotations_per_head else 1,
1,
vecs.shape[-1],
self.n_hashes,
rot_size // 2)
random_rotations = torch.randn(rotations_shape, dtype=vecs.dtype, device=device).expand(batch_size, -1, -1,
-1) # batch,hidden,nhash,bin
dropped_vecs = self.dropout_for_hash(vecs) # batch,ngene,hidden
rotated_vecs = torch.einsum('btf,bfhi->bhti', dropped_vecs, random_rotations) # batch,nhash,ngene,bin
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
buckets = torch.argmax(rotated_vecs, dim=-1) # batch,nhash,ngene,whichbin
offsets = torch.arange(self.n_hashes, device=device)
offsets = torch.reshape(offsets * n_buckets, (1, -1, 1))
buckets = torch.reshape(buckets + offsets, (batch_size, -1,))
return buckets
def forward(self, qk, v, query_len=None, return_attn=False, input_mask=None, **kwargs):
batch_size, seqlen, dim, device = *qk.shape, qk.device
query_len = default(query_len, seqlen)
is_reverse = kwargs.pop('_reverse', False)
depth = kwargs.pop('_depth', None)
assert seqlen % (
self.bucket_size * 2) == 0, f'Sequence length ({seqlen}) needs to be divisible by target bucket size x 2 - ' \
f'{self.bucket_size * 2}'
n_buckets = seqlen // self.bucket_size
buckets = self.hash_vectors(n_buckets, qk, key_namespace=depth, fetch=is_reverse, set_cache=self.training) # batch,
# (nhash,ngene,whichbin)
# We use the same vector as both a query and a key.
assert int(buckets.shape[1]) == self.n_hashes * seqlen
total_hashes = self.n_hashes
ticker = torch.arange(total_hashes * seqlen, device=device).unsqueeze(0).expand_as(buckets)
buckets_and_t = seqlen * buckets + (ticker % seqlen)
buckets_and_t = buckets_and_t.detach()
sbuckets_and_t, sticker = sort_key_val(buckets_and_t, ticker, dim=-1)
_, undo_sort = sticker.sort(dim=-1)
del ticker
sbuckets_and_t = sbuckets_and_t.detach()
sticker = sticker.detach()
undo_sort = undo_sort.detach()
st = (sticker % seqlen)
sqk = batched_index_select(qk, st) # batch gene dim
sv = batched_index_select(v, st)
# Split off a "bin" axis so that attention only occurs within chunks.
chunk_size = total_hashes * n_buckets
bq_t = bkv_t = torch.reshape(st, (batch_size, chunk_size, -1))
bqk = torch.reshape(sqk, (batch_size, chunk_size, -1, dim))
bv = torch.reshape(sv, (batch_size, chunk_size, -1, dim))
bq = bqk
bk = F.normalize(bqk, p=2, dim=-1).type_as(bq)
def look_one_back(x):
x_extra = torch.cat([x[:, -1:, ...], x[:, :-1, ...]], dim=1)
return torch.cat([x, x_extra], dim=2)
bk = look_one_back(bk)
bv = look_one_back(bv)
bkv_t = look_one_back(bkv_t)
lens = bk.shape[-1] // self.dim_per_head
# Dot-product attention.
dots = 0
tk = 64
if return_attn:
for i in range(lens // tk):
dots += (torch.einsum('bhie,bhje->bhij', bq[:, :, :, i * tk * self.dim_per_head:(i + 1) * tk * self.dim_per_head],
bk[:, :, :, i * tk * self.dim_per_head:(i + 1) * tk * self.dim_per_head]) * (dim ** -0.5))
if lens % tk != 0:
dots += (torch.einsum('bhie,bhje->bhij', bq[:, :, :, -(len(bk) % tk) * self.dim_per_head:],
bk[:, :, :, -(len(bk) % tk) * self.dim_per_head:]) * (dim ** -0.5))
else:
dots = (torch.einsum('bhie,bhje->bhij', bq, bk) * (dim ** -0.5))
masked_value = max_neg_value(dots)
if input_mask is not None:
mq = input_mask.gather(1, st).reshape((batch_size, chunk_size, -1))
mkv = look_one_back(mq)
mask = mq[:, :, :, None] * mkv[:, :, None, :]
# print((mask==1).sum(),batch_size,chunk_size)
dots.masked_fill_(mask == 0, masked_value)
del mask
# Mask out attention to self except when no other targets are available.
self_mask = bq_t[:, :, :, None] == bkv_t[:, :, None, :]
dots.masked_fill_(self_mask, TOKEN_SELF_ATTN_VALUE)
del self_mask
# Mask out attention to other hash buckets.
bq_buckets = bkv_buckets = torch.reshape(sbuckets_and_t // seqlen, (batch_size, chunk_size, -1))
bkv_buckets = look_one_back(bkv_buckets)
bucket_mask = bq_buckets[:, :, :, None] != bkv_buckets[:, :, None, :]
dots.masked_fill_(bucket_mask, masked_value)
del bucket_mask
# Softmax.
dots_logsumexp = torch.logsumexp(dots, dim=-1, keepdim=True)
dots = torch.exp(dots - dots_logsumexp).type_as(dots)
bo = torch.einsum('buij,buje->buie', dots, bv)
so = torch.reshape(bo, (batch_size, -1, dim))
slogits = torch.reshape(dots_logsumexp, (batch_size, -1,))
# unsort logits
o = batched_index_select(so, undo_sort)
logits = slogits.gather(1, undo_sort)
o = torch.reshape(o, (batch_size, total_hashes, seqlen, dim))
logits = torch.reshape(logits, (batch_size, total_hashes, seqlen, 1))
if query_len != seqlen:
query_slice = (slice(None), slice(None), slice(0, query_len))
o, logits = o[query_slice], logits[query_slice]
probs = torch.exp(logits - torch.logsumexp(logits, dim=1, keepdim=True))
out = torch.sum(o * probs, dim=1)
attn = torch.empty(0, device=device)
# return unsorted attention weights
if return_attn:
attn_unsort = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
attn_unsort = attn_unsort.view(batch_size * total_hashes, -1).long()
unsorted_dots = torch.zeros(batch_size * total_hashes, seqlen * seqlen, device=device)
unsorted_dots.scatter_add_(1, attn_unsort, dots.view_as(attn_unsort))
del attn_unsort
unsorted_dots = unsorted_dots.reshape(batch_size, total_hashes, seqlen, seqlen)
attn = torch.sum(unsorted_dots[:, :, 0:query_len, :] * probs, dim=1)
# return output, attention matrix, and bucket distribution
return out, attn, buckets
class LSHSelfAttention(nn.Module):
def __init__(self, dim, heads=8, bucket_size=64, n_hashes=4, causal=False, dim_head=None, attn_chunks=1,
random_rotations_per_head=False, attend_across_buckets=True, allow_duplicate_attention=True, num_mem_kv=0,
use_full_attn=False, full_attn_thres=None, post_attn_dropout=0.,
dropout=0., n_local_attn_heads=0, **kwargs):
super().__init__()
assert dim_head or (dim % heads) == 0, 'dimensions must be divisible by number of heads'
assert n_local_attn_heads < heads, 'local attention heads must be less than number of heads'
dim_head = default(dim_head, dim // heads)
dim_heads = dim_head * heads
self.dim = dim
self.heads = heads
self.dim_head = dim_head
self.attn_chunks = default(attn_chunks, 1)
self.dim_per_head = dim // heads
v_dim = dim_heads
self.toqk = nn.Linear(dim, dim_heads, bias=False)
self.tov = nn.Linear(dim, v_dim, bias=False)
self.bucket_size = bucket_size
self.lsh_attn = LSHAttention(bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, dim_per_head=self.dim_per_head,
random_rotations_per_head=random_rotations_per_head,
attend_across_buckets=attend_across_buckets,
allow_duplicate_attention=allow_duplicate_attention,
dropout=dropout, **kwargs)
self.post_attn_dropout = nn.Dropout(post_attn_dropout)
self.use_full_attn = use_full_attn
self.full_attn_thres = default(full_attn_thres, bucket_size)
self.num_mem_kv = num_mem_kv
self.mem_kv = nn.Parameter(torch.randn(1, num_mem_kv, dim, requires_grad=True)) if num_mem_kv > 0 else None
self.n_local_attn_heads = n_local_attn_heads
self.callback = None
def forward(self, x, return_attn, input_mask=None, **kwargs):
b, t, e, h, dh, m = *x.shape, self.heads, self.dim_head, self.num_mem_kv
kv_len = t
qk = self.toqk(x) # batch gene | |
records:%s' %(k,len(chrDataSets)))
if(lengthChrDataSets>=clusterConfigs['record']): ##总行数>record
##对chrDataSets进行考虑record 和 threshold
##对chrDataSets根据Region value 进行排序
#for item in chrDataSets:
# print(item[regionColumn])
chrDataSets.sort(key=lambda x:x[regionColumn])
##此时chrDataSets已经排序 按照regionColumn 值 升序 排练
##todo 对chrDataSets 的row 统计 threshold,record
chrRowMaxNumber = len(chrDataSets)
if k not in resultClusterDicts:
resultClusterDicts[k]={}
chrDataSetsClusteredIndexList=[]
for i in range(0,chrRowMaxNumber):
if i not in chrDataSetsClusteredIndexList:
IClusteredGroup=[]
for j in range(i+1,chrRowMaxNumber):
if (int(chrDataSets[j][regionColumn])-int(chrDataSets[i][regionColumn])<=clusterConfigs['threshold']):
IClusteredGroup.append(j)
else:
break
if len(IClusteredGroup)>=(clusterConfigs['record']-1):
IClusteredGroup.append(i)
IClusteredGroup.sort()
print(IClusteredGroup)
cls_name_prefix=""
if clusterConfigs['clusterPrefix']!="":
cls_name_prefix=clusterConfigs['clusterPrefix']+'-'
clusterName = cls_name_prefix+'C'+str(k)+'-'+str(i)+'-'+str(j)
if clusterName not in resultClusterDicts[k]:
resultClusterDicts[k][clusterName]=[]
clusteredCount+=1
for ij in IClusteredGroup:
resultClusterDicts[k][clusterName].append(chrDataSets[ij])
## sum all valid coverage
sumALLCoverageOfAllValidClsName+=int(chrDataSets[ij][coverageColumn])
chrDataSetsClusteredIndexList.append(ij)
#print(chrDataSets[ij])
else:
continue
##collect all result into list
print('SUM Coverage Of All Cluster:%s' % sumALLCoverageOfAllValidClsName)
clusteredResult = []
pa=0
for chr in resultClusterDicts.keys():
for clsName in resultClusterDicts[chr].keys():
## 得到当前clustername 的所有行
sumCoverageOfClsName=0
sumCountOfClsName=0
sumFrequencyOfClsName=0
sumRowOfClsName=0
lastRowOfClsName=len(resultClusterDicts[chr][clsName])
r=0
for row in resultClusterDicts[chr][clsName]:
r+=1
## == row[0]
#print('current cls:%s'%row)
sumCoverageOfClsName+=int(row[coverageColumn])
sumCountOfClsName+=int(row[countColumn])
sumFrequencyOfClsName+=float(row[frequencyColumn])
sumRowOfClsName+=1
if r==lastRowOfClsName:
row[insertColumns['pbColumn']['index']]=float(sumCoverageOfClsName)/float(sumALLCoverageOfAllValidClsName)
row[insertColumns['pabColumn']['index']]=float(sumCountOfClsName)/float(sumCoverageOfClsName)
row[insertColumns['cfColumn']['index']]=sumFrequencyOfClsName
row[insertColumns['crColumn']['index']]=sumRowOfClsName
pa += (float(row[insertColumns['pbColumn']['index']])*float(row[insertColumns['pabColumn']['index']]))
row[insertColumns['clusterColumn']['index']]=clsName
clusteredResult.append(row)
##from result list into all original data for csv file
for row in range(1,rowMaxCount):
for rowR in clusteredResult:
if (rowR[chromosomeColumn]==clusterFileDataSet[row][chromosomeColumn]) and (rowR[regionColumn]==clusterFileDataSet[row][regionColumn]):
clusterFileDataSet[row][insertColumns['clusterColumn']['index']]=rowR[insertColumns['clusterColumn']['index']]
pb=clusterFileDataSet[row][insertColumns['pbColumn']['index']]=rowR[insertColumns['pbColumn']['index']]
pab=clusterFileDataSet[row][insertColumns['pabColumn']['index']]=rowR[insertColumns['pabColumn']['index']]
clusterFileDataSet[row][insertColumns['cfColumn']['index']]=rowR[insertColumns['cfColumn']['index']]
clusterFileDataSet[row][insertColumns['crColumn']['index']]=rowR[insertColumns['crColumn']['index']]
if clusterFileDataSet[row][insertColumns['pbColumn']['index']] !='':
clusterFileDataSet[row][insertColumns['paColumn']['index']]=pa
clusterFileDataSet[row][insertColumns['pbaColumn']['index']]=(pb*pab)/pa
break
##
prefix = str(clusteredCount)+'_'+ "_".join(["%s%s" % (k, v) for k, v in SortedDict(clusterConfigs).items()])+'_'
resultFilePath = generateResultFilePath(clusterfileabspath,prefix)
saveDataToCSV([],clusterFileDataSet,resultFilePath,',')
print("calculated end")
def exonFile(exonDataFilePath,bedFilePath):
_exonFile(exonDataFilePath,bedFilePath)
def _exonFile(exonDataFilePath,bedFilePath):
print("acting input exon data file")
if os.path.isdir(exonDataFilePath):
print("exon data file is a directory:%s" % exonDataFilePath)
for root,dirs,files in os.walk(os.path.abspath(exonDataFilePath)):
for file in files:
filename,fileext=os.path.splitext(file)
if fileext=='.csv':
exondatafileabspath = root+os.sep+file
_exonSingleDataFile(exondatafileabspath,bedFilePath)
elif os.path.isfile(exonDataFilePath):
print("exon data file is a single file:%s" % exonDataFilePath)
exondatafileabspath = os.path.abspath(exonDataFilePath)
_exonSingleDataFile(exondatafileabspath,bedFilePath)
print("action is end")
def _exonSingleDataFile(exondatafileabspath,bedFilePath):
print("exon data file :%s" % exondatafileabspath)
if not os.path.isfile(exondatafileabspath):
print("exon data file :%s is not exist!" % exondatafileabspath)
sys.exit()
resultFilePath = generateResultFilePath(exondatafileabspath)
if os.path.isfile(resultFilePath):
print("delete old result file :%s" % resultFilePath)
os.remove(resultFilePath)
print("loading file")
exonFileDataSet=[]
exonColumn=0
chromosomeColumn = -1
regionColumn=-1
i=0
print("generating data set from exon file")
filename,fileext=os.path.splitext(exondatafileabspath)
if fileext=='.csv':
exonFileDataSetOrig = getDataFromCSV(False,',',exondatafileabspath)
exonFileDataSetOrigTitleRow = exonFileDataSetOrig[0]
exonFileDataSetOrigTitleRow.insert(exonColumn,'')
for col in exonFileDataSetOrigTitleRow:
if(chromosomeColumn==-1 and col.lower()=='chromosome'):
chromosomeColumn=i
print('chromosomeColumn:%s'% chromosomeColumn)
if(regionColumn==-1 and col.lower()=='region'):
regionColumn=i
print('regionColumn:%s'% regionColumn)
i+=1
exonFileDataSet.append(exonFileDataSetOrigTitleRow)
for row in range(1,len(exonFileDataSetOrig)):##第2行数据开始,第1行为标题
exonFileRow = []
exonFileDataSetOrig[row].insert(exonColumn,'')
for col in range(0,len(exonFileDataSetOrig[row])):
if col==regionColumn:
colValue=int(exonFileDataSetOrig[row][col])
else:
colValue=exonFileDataSetOrig[row][col]
exonFileRow.append(colValue)
exonFileDataSet.append(exonFileRow)
print("generated end")
rowMaxCount = len(exonFileDataSet)
exonFileDataSet[0][exonColumn]='EXON'
print("calculating")
bedFileDataDicts = readyBedFileData(bedFilePath)
#print(bedFileDataDicts.keys())
for row in range(1,rowMaxCount):
dstChromosome = exonFileDataSet[row][chromosomeColumn]
dstRegion = exonFileDataSet[row][regionColumn] ##above int()
chrKey = 'chr'+str(dstChromosome)
if chrKey in bedFileDataDicts:
for bedRow in bedFileDataDicts[chrKey]:
posStart = int(bedRow[1])
posEnd = int(bedRow[2])
if dstRegion>=posStart and dstRegion<=posEnd:
mark = bedRow[5].strip().upper()
if mark=='CDS':
exonFileDataSet[row][exonColumn] = 'CDS'
print('pos:%s:%s,exon:%s'%(dstChromosome,dstRegion,bedRow[3].strip()+'CDS'))
elif mark =='INTRON':
exonFileDataSet[row][exonColumn] = 'INTRON'
print('pos:%s:%s,exon:%s'%(dstChromosome,dstRegion,bedRow[3].strip()+'INTRON'))
elif mark == 'UTR':
utr = bedRow[6].strip().upper()
if utr in ['UTR5','UTR3']:
exonFileDataSet[row][exonColumn] = utr
print('pos:%s:%s,exon:%s'%(dstChromosome,dstRegion,bedRow[3].strip()+utr))
if exonFileDataSet[row][exonColumn] !='':
break ###
saveDataToCSV([],exonFileDataSet,resultFilePath)
print("action is end")
def readyBedFileData(bedFilePath):
if os.path.exists(bedFilePath):
bedFileData = getDataFromCSV(False,'\t',bedFilePath)
bedFileDataDictionaryByCHR = {}
for row in bedFileData:
chrKey=str(row[0])
if chrKey not in bedFileDataDictionaryByCHR:
bedFileDataDictionaryByCHR[chrKey] = []
bedFileDataDictionaryByCHR[chrKey].append(row)
print("converted successfully")
return bedFileDataDictionaryByCHR
else:
print('bed file not exist!')
sys.exit()
def actDataFile(genome,settings,dataFilePath,profileSourceData):
dbProfile = settings['profile']
if (genome=='GRCm38' and dbProfile=='RepeatMasker'):
_actDataFile_GRCm38_RepeatMasker(dataFilePath,profileSourceData)
elif (genome=='GRCm38' and dbProfile=='strand'):
_actDataFile_GRCm38_strand(dataFilePath,profileSourceData)
elif (genome=='GRCm38' and dbProfile=='snp142Common'):
_actDataFile_GRCm38_snp142Common(dataFilePath,profileSourceData)
def _actDataFile_GRCm38_RepeatMasker(dataFilePath,profileSourceData):
print("acting input data file")
if os.path.isdir(dataFilePath):
print("data file is a directory:%s" % dataFilePath)
for root,dirs,files in os.walk(os.path.abspath(dataFilePath)):
for file in files:
filename,fileext=os.path.splitext(file)
if fileext=='.xlsx':
datafileabspath = root+os.sep+file
_actSingleDataFile_GRCm38_RepeatMasker(datafileabspath,profileSourceData)
elif os.path.isfile(dataFilePath):
print("data file is a single file:%s" % dataFilePath)
datafileabspath = os.path.abspath(dataFilePath)
_actSingleDataFile_GRCm38_RepeatMasker(datafileabspath,profileSourceData)
print("action is end")
def _actDataFile_GRCm38_strand(dataFilePath,profileSourceData):
print("acting input data file")
if os.path.isdir(dataFilePath):
print("data file is a directory:%s" % dataFilePath)
for root,dirs,files in os.walk(os.path.abspath(dataFilePath)):
for file in files:
filename,fileext=os.path.splitext(file)
if fileext=='.xlsx':
datafileabspath = root+os.sep+file
_actSingleDataFile_GRCm38_strand(datafileabspath,profileSourceData)
elif os.path.isfile(dataFilePath):
print("data file is a single file:%s" % dataFilePath)
datafileabspath = os.path.abspath(dataFilePath)
_actSingleDataFile_GRCm38_strand(datafileabspath,profileSourceData)
print("action is end")
def _actDataFile_GRCm38_snp142Common(dataFilePath,profileSourceData):
print("acting input data file")
if os.path.isdir(dataFilePath):
print("data file is a directory:%s" % dataFilePath)
for root,dirs,files in os.walk(os.path.abspath(dataFilePath)):
for file in files:
filename,fileext=os.path.splitext(file)
if fileext=='.xlsx':
datafileabspath = root+os.sep+file
_actSingleDataFile_GRCm38_snp142Common(datafileabspath,profileSourceData)
elif os.path.isfile(dataFilePath):
print("data file is a single file:%s" % dataFilePath)
datafileabspath = os.path.abspath(dataFilePath)
_actSingleDataFile_GRCm38_snp142Common(datafileabspath,profileSourceData)
print("action is end")
def _actSingleDataFile_GRCm38_strand(datafileabspath,profileSourceData):
print("dealing data file :%s" % datafileabspath)
if not os.path.isfile(datafileabspath):
print("data file :%s is not exist!" % datafileabspath)
sys.exit()
#DEAL
resultFilePath = generateResultFilePath(datafileabspath)
if os.path.isfile(resultFilePath):
print("delete old result file :%s" % resultFilePath)
os.remove(resultFilePath)
print("loading data file")
wb=load_workbook(filename=datafileabspath,data_only=True,read_only=True)##fast mode
ws=wb.active
dataFileDataSet=[]
chromosomeColumn = -1
regionColumn=-1
##insert column index defintion
i=0
print("generating data set from data file")
for row in ws.rows:##first row 标题行
dataFileRow = []
##insert 1 columns
for insertItem in range(0,1): ##repClass-mode ##此处变更时,下面chromosomeColumn,regionColumn 相应变更
dataFileRow.insert(0,'')
for cell in row:
dataFileRow.append(cell.value)
if(chromosomeColumn==-1 and cell.value.lower()=='chromosome'):
chromosomeColumn=i+1 ##repClass-mode ## insertItem : 1 0=>1
print("found Chromosome Column index is:%s" % chromosomeColumn)
if(regionColumn==-1 and cell.value.lower()=='region'):
regionColumn=i+1 ##repClass-mode ## insertItem : 1 0=>1
print("found Region Column index is:%s" % regionColumn)
i=i+1
dataFileDataSet.append(dataFileRow)
###save all cell.vaule to[[],[],...,[]]
print("generated end")
##insert repClass column and repFamily column after region's column
##keep insert order
##repClass-mode
print("insert title column")
dataFileDataSet[0][0]='strand'
strandCol = 0
rowMaxCount = len(dataFileDataSet)
print("calculating")
for row in range(1,rowMaxCount):###第0行为标题行
dstChromosome = dataFileDataSet[row][chromosomeColumn]
dstRegion = int(dataFileDataSet[row][regionColumn])
## geneName !== ''
dstGeneName = dataFileDataSet[row][chromosomeColumn+8]
print(dstGeneName)
print("current Chromosome:Region is : %s:%s" %(dstChromosome,dstRegion))
chrKey = 'chr'+str(dstChromosome)
if chrKey in list(profileSourceData.keys()):
## ['chrom','txStart','txEnd','strand']
## only search one time
for sourceItem in profileSourceData[chrKey]:###[[],[],...,[]]
if (dstRegion>=int(sourceItem[1]) and dstRegion<=int(sourceItem[2])):
strandVal = sourceItem[3]
if strandVal in ['-','+']:
print("found target :%s" %sourceItem)
dataFileDataSet[row][strandCol]=strandVal
break
print("calculated end")
saveDataToCSV([],dataFileDataSet,resultFilePath)
def _actSingleDataFile_GRCm38_snp142Common(datafileabspath,profileSourceData):
print("dealing data file :%s" % datafileabspath)
if not os.path.isfile(datafileabspath):
print("data file :%s is not exist!" % datafileabspath)
sys.exit()
#DEAL
resultFilePath = generateResultFilePath(datafileabspath)
if os.path.isfile(resultFilePath):
print("delete old result file :%s" % resultFilePath)
os.remove(resultFilePath)
print("loading data file")
wb=load_workbook(filename=datafileabspath,data_only=True,read_only=True)##fast mode
ws=wb.active
dataFileDataSet=[]
chromosomeColumn = -1
regionColumn=-1
##insert column index defintion
i=0
print("generating data set from data file")
for row in ws.rows:##first row 标题行
dataFileRow = []
##insert 1 columns
for insertItem in range(0,1): ##repClass-mode ##此处变更时,下面chromosomeColumn,regionColumn 相应变更
dataFileRow.insert(0,'')
for cell in row:
dataFileRow.append(cell.value)
if(chromosomeColumn==-1 and cell.value.lower()=='chromosome'):
chromosomeColumn=i+1 ##repClass-mode ## insertItem : 1 0=>1
print("found Chromosome Column index is:%s" % chromosomeColumn)
if(regionColumn==-1 and cell.value.lower()=='region'):
regionColumn=i+1 ##repClass-mode ## insertItem : 1 0=>1
print("found Region Column index is:%s" % regionColumn)
i=i+1
dataFileDataSet.append(dataFileRow)
###save all cell.vaule to[[],[],...,[]]
print("generated end")
##insert repClass column and repFamily column after region's column
##keep insert order
##repClass-mode
print("insert title column")
dataFileDataSet[0][0]='snp142Common'
snp142CommonCol = 0
rowMaxCount = len(dataFileDataSet)
print("calculating")
for row in range(1,rowMaxCount):###第0行为标题行
dstChromosome = dataFileDataSet[row][chromosomeColumn]
dstRegion = int(dataFileDataSet[row][regionColumn])
## geneName !== ''
#dstGeneName = dataFileDataSet[row][chromosomeColumn+8]
#print(dstGeneName)
print("current Chromosome:Region is : %s:%s" %(dstChromosome,dstRegion))
chrKey = 'chr'+str(dstChromosome)
if chrKey in list(profileSourceData.keys()):
## ['chrom','chromStart','chromEnd','name']
## only search one time
for sourceItem in profileSourceData[chrKey]:###[[],[],...,[]]
if (dstRegion>=int(sourceItem[1]) and dstRegion<=int(sourceItem[2])):
snp142CommonVal = sourceItem[3]
if snp142CommonVal != '':
print("found target :%s" %sourceItem)
dataFileDataSet[row][snp142CommonCol]=snp142CommonVal
break
print("calculated end")
saveDataToCSV([],dataFileDataSet,resultFilePath)
def _actSingleDataFile_GRCm38_RepeatMasker(datafileabspath,profileSourceData):
print("dealing data file :%s" % datafileabspath)
if not os.path.isfile(datafileabspath):
print("data file :%s is not exist!" % datafileabspath)
sys.exit()
#DEAL
resultFilePath = generateResultFilePath(datafileabspath)
if os.path.isfile(resultFilePath):
print("delete old result file :%s" % resultFilePath)
os.remove(resultFilePath)
print("loading data file")
wb=load_workbook(filename=datafileabspath,data_only=True,read_only=True)##fast mode
ws=wb.active
dataFileDataSet=[]
chromosomeColumn = -1
regionColumn=-1
##insert column index defintion
i=0
print("generating data set from data file")
for row in ws.rows:##first row 标题行
dataFileRow = []
##insert 8 columns for 4(sine,line,ltr,dna)x2(repclass,repfamily)
for insertItem in range(0,8): ##repClass-mode ##此处变更时,下面chromosomeColumn,regionColumn 相应变更
dataFileRow.insert(0,'')
for cell in row:
dataFileRow.append(cell.value)
if(chromosomeColumn==-1 and cell.value.lower()=='chromosome'):
chromosomeColumn=i+8 ##repClass-mode ## insertItem : 8 0=>8
print("found Chromosome Column index is:%s" % chromosomeColumn)
if(regionColumn==-1 and cell.value.lower()=='region'):
regionColumn=i+8 ##repClass-mode ## insertItem : 8 0=>8
print("found Region Column index is:%s" % regionColumn)
i=i+1
dataFileDataSet.append(dataFileRow)
###save all cell.vaule to[[],[],...,[]]
print("generated end")
##insert repClass column and repFamily column after region's column
##keep insert order
##repClass-mode
print("insert title column")
dataFileDataSet[0][0]='repClassSINE'
dataFileDataSet[0][1]='repFamilySINE'
dataFileDataSet[0][2]='repClassLINE'
dataFileDataSet[0][3]='repFamilyLINE'
dataFileDataSet[0][4]='repClassLTR'
dataFileDataSet[0][5]='repFamilyLTR'
dataFileDataSet[0][6]='repClassDNA'
dataFileDataSet[0][7]='repFamilyDNA'
rowMaxCount = len(dataFileDataSet)
print("calculating")
for row in range(1,rowMaxCount):###第0行为标题行
dstChromosome = dataFileDataSet[row][chromosomeColumn]
dstRegion = int(dataFileDataSet[row][regionColumn])
print("current Chromosome:Region is : %s:%s" %(dstChromosome,dstRegion))
chrKey = 'chr'+str(dstChromosome)
if chrKey in list(profileSourceData.keys()):
##sourceItem:['genoName','genoStart','genoEnd','repClass','repFamily']
for sourceItem in profileSourceData[chrKey]:###[[],[],...,[]]
if (dstRegion>=int(sourceItem[1]) and dstRegion<=int(sourceItem[2])):
##repClass:sourceItem[3] repFamily:sourceItem[4]
#set repClass column and repFamily column value
repClassVal = sourceItem[3]
repFamilyVal = sourceItem[4]
##repClass-mode
##todo,trim space,upper
if repClassVal=='SINE':
repClassCol=0
repFamilyCol=1
dataFileDataSet[row][repClassCol]=repClassVal
dataFileDataSet[row][repFamilyCol]=repFamilyVal
if repClassVal=='LINE':
repClassCol=2
repFamilyCol=3
dataFileDataSet[row][repClassCol]=repClassVal
dataFileDataSet[row][repFamilyCol]=repFamilyVal
if repClassVal=='LTR':
repClassCol=4
repFamilyCol=5
dataFileDataSet[row][repClassCol]=repClassVal
dataFileDataSet[row][repFamilyCol]=repFamilyVal
if repClassVal=='DNA':
repClassCol=6
repFamilyCol=7
dataFileDataSet[row][repClassCol]=repClassVal
dataFileDataSet[row][repFamilyCol]=repFamilyVal
print("found target :%s" %sourceItem)
##todo , sine,line and so on same line
## 考虑注释break 因为sine,line,ltr,dna 可能不是相斥的
##break
print("calculated end")
saveDataToCSV([],dataFileDataSet,resultFilePath)
###query source data####
def readyProfileSourceData(genome,settings):
dbProfile = settings['profile']
if (genome=='GRCm38' and dbProfile=='RepeatMasker'):
return _readyProfileSourceData_GRCm38_RepeatMasker(genome,settings)
elif (genome=='GRCm38' and dbProfile=='strand'):
return _readyProfileSourceData_GRCm38_strand(genome,settings)
elif (genome=='GRCm38' and dbProfile=='snp142Common'):
return _readyProfileSourceData_GRCm38_snp142Common(genome,settings)
def _loadProfileSourceData_GRCm38_RepeatMasker(genome,settings):
print("loading profile source data")
dbProfile = settings['profile']
profileSourceDataPath = setProfileSourceDataPath(genome,dbProfile)
if not os.path.exists(profileSourceDataPath):
print("connected to remote mysql database online")
dbType = settings['type']
dbConfig = {
'user':settings['user'],
'host':settings['host'],
'database':settings['database'],
'raise_on_warnings':True
}
dbConn = dbConnector(dbType,dbConfig)
querySourceData(genome,dbProfile,dbConn)##save to profileSourceDataPath
dbConn.close()
profileSourceData = getDataFromCSV(True,',',profileSourceDataPath)
print("loaded successfully")
return profileSourceData
def _loadProfileSourceData_GRCm38_strand(genome,settings):
print("loading profile source data")
dbProfile = settings['profile']
profileSourceDataPath = setProfileSourceDataPath(genome,dbProfile)
if not os.path.exists(profileSourceDataPath):
print("connected to remote mysql database online")
dbType = settings['type']
dbConfig = {
'user':settings['user'],
'host':settings['host'],
'database':settings['database'],
'raise_on_warnings':True
}
dbConn = dbConnector(dbType,dbConfig)
querySourceData(genome,dbProfile,dbConn)##save to profileSourceDataPath
dbConn.close()
profileSourceData = getDataFromCSV(True,',',profileSourceDataPath)
print("loaded successfully")
return profileSourceData
def _loadProfileSourceData_GRCm38_snp142Common(genome,settings):
print("loading profile source data")
dbProfile = settings['profile']
profileSourceDataPath = setProfileSourceDataPath(genome,dbProfile)
if not os.path.exists(profileSourceDataPath):
print("connected to remote mysql database online")
dbType = settings['type']
dbConfig = {
'user':settings['user'],
'host':settings['host'],
'database':settings['database'],
'raise_on_warnings':True
}
dbConn = dbConnector(dbType,dbConfig)
querySourceData(genome,dbProfile,dbConn)##save to profileSourceDataPath
dbConn.close()
profileSourceData = getDataFromCSV(True,',',profileSourceDataPath)
print("loaded successfully")
return profileSourceData
def _readyProfileSourceData_GRCm38_RepeatMasker(genome,settings):
print("converting profile source data to dict")
profileSourceData = _loadProfileSourceData_GRCm38_RepeatMasker(genome,settings)
profileSourceDataDictionaryByCHR = {}
#profile csv file : column : ['genoName','genoStart','genoEnd','repClass','repFamily']
for row in profileSourceData:
chrKey=str(row[0])
if chrKey not in list(profileSourceDataDictionaryByCHR.keys()):
profileSourceDataDictionaryByCHR[chrKey] = []
profileSourceDataDictionaryByCHR[chrKey].append(row)
print("converted successfully")
return profileSourceDataDictionaryByCHR
def _readyProfileSourceData_GRCm38_snp142Common(genome,settings):
print("converting profile source data to dict")
profileSourceData = _loadProfileSourceData_GRCm38_snp142Common(genome,settings)
profileSourceDataDictionaryByCHR = {}
## ['chrom','chromStart','chromEnd','name']
for row in profileSourceData:
chrKey=str(row[0])
if chrKey not in list(profileSourceDataDictionaryByCHR.keys()):
profileSourceDataDictionaryByCHR[chrKey] = []
profileSourceDataDictionaryByCHR[chrKey].append(row)
print("converted successfully")
return profileSourceDataDictionaryByCHR
def _readyProfileSourceData_GRCm38_strand(genome,settings):
print("converting profile source data to dict")
profileSourceData = _loadProfileSourceData_GRCm38_strand(genome,settings)
profileSourceDataDictionaryByCHR = {}
#profile csv file : column : ['genoName','genoStart','genoEnd','repClass','repFamily']
## ['chrom','strand','txStart','txEnd']
for row in profileSourceData:
chrKey=str(row[0])
if chrKey not in list(profileSourceDataDictionaryByCHR.keys()):
profileSourceDataDictionaryByCHR[chrKey] = []
profileSourceDataDictionaryByCHR[chrKey].append(row)
print("converted successfully")
return profileSourceDataDictionaryByCHR
def querySourceData(genome,dbProfile,dbConn):
print("current source data about genome is :%s,profile is :%s" % (genome,dbProfile))
if (genome=='GRCm38' and dbProfile=='RepeatMasker'):
profileSourceDataPath = setProfileSourceDataPath(genome,dbProfile)
_query_source_data_GRCm38_RepeatMasker(dbConn,profileSourceDataPath)
elif (genome=='GRCm38' and dbProfile=='strand'):
profileSourceDataPath = setProfileSourceDataPath(genome,dbProfile)
_query_source_data_GRCm38_strand(dbConn,profileSourceDataPath)
elif (genome=='GRCm38' and dbProfile=='snp142Common'):
profileSourceDataPath = setProfileSourceDataPath(genome,dbProfile)
_query_source_data_GRCm38_snp142Common(dbConn,profileSourceDataPath)
def _query_source_data_GRCm38_RepeatMasker(dbConn,profileSourceDataPath):
print("querying source data online")
cursor = dbConn.cursor()
column=['genoName','genoStart','genoEnd','repClass','repFamily']
##repClass-mode
query = "select genoName,genoStart,genoEnd,repClass,repFamily from rmsk where repClass in ('SINE','LINE','LTR','DNA')"
cursor.execute(query)
data = []
for (genoName,genoStart,genoEnd,repClass,repFamily) in cursor:
data.append([genoName,genoStart,genoEnd,repClass,repFamily])
print("query successfully")
## todo ,
saveDataToCSV(column,data,profileSourceDataPath)
cursor.close()
def _query_source_data_GRCm38_strand(dbConn,profileSourceDataPath):
print("querying source data online")
cursor = dbConn.cursor()
## todo txStart,txEnd 和genoStart,genoEnd 是否相等
##### chr table
# 此些表加入会出错
## in fact : _est,_intronEst,_mrna
## mgcFullMrna,orfeomeMrna
## nestedRepeats
#g=list(range(1,20))+['X','Y']
#column=['tName','tStart','tEnd','strand']
#tables=[('chr'+str(x)+'_est') for x in g]+[('chr'+str(x)+'_mrna') for x in g]+['mgcFullMrna','orfeomeMrna']
########################################
#[('chr'+str(x)+'_intronEst') for x in g]
data = []
#for tb in tables:
###
#################
# query = "select tName,tStart,tEnd,strand from %s where 1 " % tb
# cursor.execute(query)
# for (chrom,txStart,txEnd,strand) in cursor:
# data.append([chrom,txStart,txEnd,strand])
### geneName
column=['chrom','txStart','txEnd','strand']
## genscan,knownGeneOld8,wgEncodeGencode2wayConsPseudoVM11,wgEncodeGencode2wayConsPseudoVM9,wgEncodeGencodeBasicVM9
## wgEncodeGencodeCompVM11,wgEncodeGencodeCompVM9,wgEncodeGencodePolyaVM11,wgEncodeGencodePolyaVM9
## wgEncodeGencodePseudoGeneVM11,wgEncodeGencodePseudoGeneVM9
tables_gene=['augustusGene','ccdsGene','geneid','knownGene','mgcGenes','orfeomeGenes','refGene','sgpGene','wgEncodeGencodePseudoGeneVM11','xenoRefGene','wgEncodeGencodeBasicVM11','genscan','knownGeneOld8','wgEncodeGencode2wayConsPseudoVM11','wgEncodeGencodeCompVM11','wgEncodeGencodePolyaVM11']
for tb_g in tables_gene:
query = "select chrom,txStart,txEnd,strand from %s where 1 " % tb_g
cursor.execute(query)
for (chrom,txStart,txEnd,strand) in cursor:
data.append([chrom,txStart,txEnd,strand])
print("query successfully")
saveDataToCSV(column,data,profileSourceDataPath,',')
cursor.close()
###query end####
def _query_source_data_GRCm38_snp142Common(dbConn,profileSourceDataPath):
print("querying source data online")
cursor = dbConn.cursor()
column=['chrom','chromStart','chromEnd','name']
##repClass-mode
query = "select chrom,chromStart,chromEnd,name from snp142Common where 1"
cursor.execute(query)
data = []
for (chrom,chromStart,chromEnd,name) in cursor:
data.append([chrom,chromStart,chromEnd,name])
print("query successfully")
## todo ,
saveDataToCSV(column,data,profileSourceDataPath)
cursor.close()
###query end####
def main():
try:
opts,args = getopt.getopt(sys.argv[1:],"hs:g:d:r:c:e:t:o:f:b:x:",["help","setting=","genome=","data=","result=","cluster=","record=","threshold=",'coverage=','frequency=','bed=','exon=',"intersectClusterName=","repeatClusterName=","gene=","clusterPrefix="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
settingsVar={
'filePath':'',
'section':''
}
genome = ''
##mark repclass,repfamily
dataFilePath = ''
##statistic repfamily
resultFilePath = ''
##统计每一列repfamily上有多少不同的clustername
##intersectClusterName
intersectClusterName = ''
repeatClusterName = 0 ##0,1
##cluster file
clusterFilePath = ''
clusterConfigs={
'record':'',#3
'threshold':'',#2500
'coverage':'',#5
'frequency':'',#5
'bayes':1,#disabled 1:enabled
'clusterPrefix':''
}
##mark cds,5utr,3utr,intron
bedFilePath =''
exonDataFilePath = ''
geneFilePath = ''
for opt,arg in opts:
if opt in ('-h',"--help"):
usage()
sys.exit()
elif opt in ('-s','--setting'):
settingsVar['filePath']=arg
elif opt in ('-g','--genome'):
settingsVar['section']=arg
genome | |
<reponame>androm3da/clang_sles
#!/usr/bin/python
#===-- x86_64_target_definition.py -----------------------------*- C++ -*-===//
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===//
#----------------------------------------------------------------------
# DESCRIPTION
#
# This file can be used with the following setting:
# plugin.process.gdb-remote.target-definition-file
# This setting should be used when you are trying to connect to a
# remote GDB server that doesn't support any of the register discovery
# packets that LLDB normally uses.
#
# Why is this necessary? LLDB doesn't require a new build of LLDB that
# targets each new architecture you will debug with. Instead, all
# architectures are supported and LLDB relies on extra GDB server
# packets to discover the target we are connecting to so that is can
# show the right registers for each target. This allows the GDB server
# to change and add new registers without requiring a new LLDB build
# just so we can see new registers.
#
# This file implements the x86_64 registers for the darwin version of
# GDB and allows you to connect to servers that use this register set.
#
# USAGE
#
# (lldb) settings set plugin.process.gdb-remote.target-definition-file /path/to/x86_64_target_definition.py
# (lldb) gdb-remote other.baz.com:1234
#
# The target definition file will get used if and only if the
# qRegisterInfo packets are not supported when connecting to a remote
# GDB server.
#----------------------------------------------------------------------
from lldb import *
# Compiler and DWARF register numbers
name_to_gcc_dwarf_regnum = {
'rax' : 0 ,
'rdx' : 1 ,
'rcx' : 2 ,
'rbx' : 3 ,
'rsi' : 4 ,
'rdi' : 5 ,
'rbp' : 6 ,
'rsp' : 7 ,
'r8' : 8 ,
'r9' : 9 ,
'r10' : 10,
'r11' : 11,
'r12' : 12,
'r13' : 13,
'r14' : 14,
'r15' : 15,
'rip' : 16,
'xmm0' : 17,
'xmm1' : 18,
'xmm2' : 19,
'xmm3' : 20,
'xmm4' : 21,
'xmm5' : 22,
'xmm6' : 23,
'xmm7' : 24,
'xmm8' : 25,
'xmm9' : 26,
'xmm10' : 27,
'xmm11' : 28,
'xmm12' : 29,
'xmm13' : 30,
'xmm14' : 31,
'xmm15' : 32,
'stmm0' : 33,
'stmm1' : 34,
'stmm2' : 35,
'stmm3' : 36,
'stmm4' : 37,
'stmm5' : 38,
'stmm6' : 39,
'stmm7' : 30,
'ymm0' : 41,
'ymm1' : 42,
'ymm2' : 43,
'ymm3' : 44,
'ymm4' : 45,
'ymm5' : 46,
'ymm6' : 47,
'ymm7' : 48,
'ymm8' : 49,
'ymm9' : 40,
'ymm10' : 41,
'ymm11' : 42,
'ymm12' : 43,
'ymm13' : 44,
'ymm14' : 45,
'ymm15' : 46
};
name_to_gdb_regnum = {
'rax' : 0,
'rbx' : 1,
'rcx' : 2,
'rdx' : 3,
'rsi' : 4,
'rdi' : 5,
'rbp' : 6,
'rsp' : 7,
'r8' : 8,
'r9' : 9,
'r10' : 10,
'r11' : 11,
'r12' : 12,
'r13' : 13,
'r14' : 14,
'r15' : 15,
'rip' : 16,
'rflags': 17,
'cs' : 18,
'ss' : 19,
'ds' : 20,
'es' : 21,
'fs' : 22,
'gs' : 23,
'stmm0' : 24,
'stmm1' : 25,
'stmm2' : 26,
'stmm3' : 27,
'stmm4' : 28,
'stmm5' : 29,
'stmm6' : 30,
'stmm7' : 31,
'fctrl' : 32,
'fstat' : 33,
'ftag' : 34,
'fiseg' : 35,
'fioff' : 36,
'foseg' : 37,
'fooff' : 38,
'fop' : 39,
'xmm0' : 40,
'xmm1' : 41,
'xmm2' : 42,
'xmm3' : 43,
'xmm4' : 44,
'xmm5' : 45,
'xmm6' : 46,
'xmm7' : 47,
'xmm8' : 48,
'xmm9' : 49,
'xmm10' : 50,
'xmm11' : 51,
'xmm12' : 52,
'xmm13' : 53,
'xmm14' : 54,
'xmm15' : 55,
'mxcsr' : 56,
'ymm0' : 57,
'ymm1' : 58,
'ymm2' : 59,
'ymm3' : 60,
'ymm4' : 61,
'ymm5' : 62,
'ymm6' : 63,
'ymm7' : 64,
'ymm8' : 65,
'ymm9' : 66,
'ymm10' : 67,
'ymm11' : 68,
'ymm12' : 69,
'ymm13' : 70,
'ymm14' : 71,
'ymm15' : 72
};
name_to_generic_regnum = {
'rip' : LLDB_REGNUM_GENERIC_PC,
'rsp' : LLDB_REGNUM_GENERIC_SP,
'rbp' : LLDB_REGNUM_GENERIC_FP,
'rdi' : LLDB_REGNUM_GENERIC_ARG1,
'rsi' : LLDB_REGNUM_GENERIC_ARG2,
'rdx' : LLDB_REGNUM_GENERIC_ARG3,
'rcx' : LLDB_REGNUM_GENERIC_ARG4,
'r8' : LLDB_REGNUM_GENERIC_ARG5,
'r9' : LLDB_REGNUM_GENERIC_ARG6
};
def get_reg_num (reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
def get_reg_num (reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
x86_64_register_infos = [
{ 'name':'rax' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'rbx' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'rcx' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg4' },
{ 'name':'rdx' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg3' },
{ 'name':'rsi' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg2' },
{ 'name':'rdi' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg1' },
{ 'name':'rbp' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'fp' },
{ 'name':'rsp' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'sp' },
{ 'name':'r8' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg5' },
{ 'name':'r9' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg6' },
{ 'name':'r10' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r11' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r12' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r13' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r14' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r15' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'rip' , 'set':0, 'bitsize':64 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'pc' },
{ 'name':'rflags', 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'cs' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'ss' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'ds' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'es' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'fs' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'gs' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'stmm0' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm1' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm2' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm3' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm4' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm5' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm6' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'stmm7' , 'set':1, 'bitsize':80 , 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'fctrl' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'fstat' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'ftag' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'fiseg' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'fioff' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'foseg' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'fooff' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'fop' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
{ 'name':'xmm0' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm1' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm2' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm3' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm4' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm5' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm6' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm7' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm8' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm9' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm10' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm11' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm12' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm13' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm14' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'xmm15' , 'set':1, 'bitsize':128, 'encoding':eEncodingVector, 'format':eFormatVectorOfUInt8 },
{ 'name':'mxcsr' , 'set':1, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex },
# Registers that are contained in or composed of one of more other registers
{ 'name':'eax' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rax[31:0]' },
{ 'name':'ebx' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rbx[31:0]' },
{ 'name':'ecx' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rcx[31:0]' },
{ 'name':'edx' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rdx[31:0]' },
{ 'name':'edi' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rdi[31:0]' },
{ 'name':'esi' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rsi[31:0]' },
{ 'name':'ebp' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rbp[31:0]' },
{ 'name':'esp' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'rsp[31:0]' },
{ 'name':'r8d' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatHex , 'slice': 'r8[31:0]' },
{ 'name':'r9d' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , | |
import base64
import logging
import os
import typing as t
from dataclasses import dataclass
from functools import partial
from inspect import isfunction
import click
from flask import Blueprint as FlaskBlueprint
from flask import Flask
from flask import Response
from flask import abort
from flask import current_app
from flask.blueprints import BlueprintSetupState
from flask.ctx import RequestContext
from flask.testing import FlaskClient
from werkzeug.datastructures import WWWAuthenticate
from werkzeug.exceptions import HTTPException, Unauthorized
from werkzeug.exceptions import InternalServerError
from werkzeug.routing import Rule
from .config import Config
from .config import DEFAULT_DEV_WORKSPACE
from .config import DEFAULT_LOCAL_WORKSPACE
from .config import DevConfig
from .config import LocalConfig
from .config import ProdConfig
from .globals import request
from .utils import HTTP_METHODS
from .utils import add_coworks_routes
from .utils import trim_underscores
from .wrappers import ApiResponse
from .wrappers import Request
from .wrappers import TokenResponse
#
# Decorators
#
def entry(fun: t.Callable = None, binary: bool = False, content_type: str = None, no_auth: bool = False) -> t.Callable:
"""Decorator to create a microservice entry point from function name.
:param fun: the entry function.
:param binary: allow payload without transformation.
:param content_type: force default content-type.
:param no_auth: set authorizer.
"""
if fun is None:
if binary and not content_type:
content_type = 'application/octet-stream'
return partial(entry, binary=binary, content_type=content_type, no_auth=no_auth)
def get_path(start):
name_ = fun.__name__[start:]
name_ = trim_underscores(name_) # to allow several functions with different args
return name_.replace('_', '/')
name = fun.__name__.upper()
for method in HTTP_METHODS:
if name == method:
path = ''
break
if name.startswith(f'{method}_'):
path = get_path(len(f'{method}_'))
break
else:
method = 'POST'
path = get_path(0)
fun.__CWS_METHOD = method
fun.__CWS_PATH = path
fun.__CWS_BINARY = binary
fun.__CWS_CONTENT_TYPE = content_type
fun.__CWS_NO_AUTH = no_auth
return fun
def hide(fun: t.Callable) -> t.Callable:
"""Hide a route of the microservice.
May be used as a decorator.
Usefull when creating inherited microservice.
"""
setattr(fun, '__cws_hidden', True)
return fun
#
# Classes
#
@dataclass
class ScheduleEntry:
"""An schedule entry is an EventBridge entry defined on a microservice, with the schedule expression,
its description and its response function."""
name: str
exp: str
desc: str
fun: t.Callable
class CoworksClient(FlaskClient):
"""Redefined to force mimetype to be 'text/plain' in case of string return.
"""
def __init__(self, *args: t.Any, aws_event=None, aws_context=None, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.environ_base.update({
"aws_event": aws_event,
"aws_context": aws_context,
})
class Blueprint(FlaskBlueprint):
""" Represents a blueprint, list of routes that will be added to microservice when registered.
See :ref:`Blueprint <blueprint>` for more information.
"""
def __init__(self, name: str = None, **kwargs):
"""Initialize a blueprint.
:param kwargs: Other Flask blueprint parameters.
"""
import_name = self.__class__.__name__.lower()
super().__init__(name or import_name, import_name, **kwargs)
@property
def logger(self) -> logging.Logger:
return current_app.logger
def make_setup_state(self, app: "TechMicroService", options: t.Dict, *args) -> BlueprintSetupState:
"""Stores creation state for deferred initialization."""
state = super().make_setup_state(app, options, *args)
# Defer blueprint route initialization.
if not options.get('hide_routes', False):
app.deferred_init_routes_functions.append(partial(add_coworks_routes, state.app, state))
return state
class TechMicroService(Flask):
"""Simple tech microservice.
See :ref:`tech` for more information.
"""
def __init__(self, name: str = None, *, configs: t.Union[Config, t.List[Config]] = None, **kwargs) -> None:
""" Initialize a technical microservice.
:param name: Name used to identify the microservice.
:param configs: Deployment configurations.
:param kwargs: Other Chalice parameters.
"""
name = name or self.__class__.__name__.lower()
self.configs = configs or [LocalConfig(), DevConfig(), ProdConfig()]
if type(self.configs) is not list:
self.configs = [configs]
super().__init__(import_name=name, static_folder=None, **kwargs)
self.test_client_class = CoworksClient
self.request_class = Request
self.response_class = ApiResponse
self.deferred_init_routes_functions: t.List[t.Callable] = []
self._cws_app_initialized = False
self._cws_conf_updated = False
@self.before_request
def before():
self._check_token()
rp = request.path
if rp != '/' and rp.endswith('/'):
abort(Response("Trailing slash avalaible only on deployed version"))
def app_context(self):
"""Override to initialize coworks microservice."""
if not self._cws_app_initialized:
add_coworks_routes(self)
for fun in self.deferred_init_routes_functions:
fun()
self._cws_app_initialized = True
return super().app_context()
def request_context(self, environ: dict) -> RequestContext:
"""Redefined to :
- initialize the environment
- add Lambda event and context in globals.
"""
ctx = super().request_context(environ)
ctx.aws_event = environ.get('aws_event')
ctx.aws_context = environ.get('aws_context')
return ctx
def cws_client(self, event, context):
"""CoWorks client with new globals."""
return super().test_client(aws_event=event, aws_context=context)
def test_client(self, *args, **kwargs):
"""This client must be used only for testing."""
self.testing = True
self._update_config(load_env=True, workspace=DEFAULT_LOCAL_WORKSPACE)
return super().test_client(*args, **kwargs)
@property
def ms_type(self) -> str:
return 'tech'
@property
def routes(self) -> t.List[Rule]:
"""Returns the list of routes defined in the microservice."""
return [r.rule for r in self.url_map.iter_rules()]
def get_config(self, workspace) -> Config:
"""Returns the configuration corresponding to the workspace."""
for conf in self.configs:
if conf.is_valid_for(workspace):
return conf
return Config()
def token_authorizer(self, token: str) -> t.Union[bool, str]:
"""Defined the authorization process.
If the returned value is False, all routes for all stages are denied.
If the returned value is True, all routes for all stages are accepted.
If the returned value is a string, then it must be a stage name and all routes are accepted for this stage.
By default no entry are accepted for security reason.
"""
workspace = self.config['WORKSPACE']
if workspace == DEFAULT_LOCAL_WORKSPACE:
return True
return token == os.getenv('TOKEN')
def base64decode(self, data):
"""Base64 decode function used for lambda interaction."""
if not isinstance(data, bytes):
data = data.encode('ascii')
output = base64.b64decode(data)
return output
def base64encode(self, data):
"""Base64 encode function used for lambda interaction."""
if not isinstance(data, bytes):
msg = f'Expected bytes type for body with binary Content-Type. Got {type(data)} type body instead.'
raise ValueError(msg)
data = base64.b64encode(data).decode('ascii')
return data
def __call__(self, arg1, arg2) -> dict:
"""Main microservice entry point."""
# Lambda event call or Flask call
if isfunction(arg2):
res = self._flask_handler(arg1, arg2)
else:
res = self._lambda_handler(arg1, arg2)
# res['headers']['x-cws-workspace'] = os.getenv('WORKSPACE')
return res
def _lambda_handler(self, event: t.Dict[str, t.Any], context: t.Dict[str, t.Any]):
"""Lambda handler.
"""
self.logger.debug(f"Event: {event}")
self.logger.debug(f"Context: {context}")
self._update_config(load_env=False)
if event.get('type') == 'TOKEN':
return self._token_handler(event, context)
return self._api_handler(event, context)
def _token_handler(self, event: t.Dict[str, t.Any], context: t.Dict[str, t.Any]) -> dict:
"""Authorization token handler.
"""
self.logger.debug(f"Calling {self.name} for authorization : {event}")
try:
res = self.token_authorizer(event['authorizationToken'])
return TokenResponse(res, event['methodArn']).json
except Exception as e:
self.logger.debug(f"Error in token handler for {self.name} : {e}")
return TokenResponse(False, event['methodArn']).json
def _api_handler(self, event: t.Dict[str, t.Any], context: t.Dict[str, t.Any]) -> dict:
"""API handler.
"""
self.logger.debug(f"Calling {self.name} by api : {event}")
def full_path():
url = event['path']
# Replaces route parameters
url = url.format(url, **event['params']['path'])
# Adds query parameters
params = event['multiValueQueryStringParameters']
if params:
url += '?'
for i, (k, v) in enumerate(params.items()):
if i:
url += '&'
for j, vl in enumerate(v):
if j:
url += '&'
url += f"{k}={vl}"
return url
# Transform as simple client call and manage exception if needed
try:
with self.cws_client(event, context) as c:
method = event['httpMethod']
kwargs = self._get_kwargs(event)
resp = getattr(c, method.lower())(full_path(), **kwargs)
return self._convert_to_lambda_response(resp)
except Exception as e:
error = e if isinstance(e, HTTPException) else InternalServerError(original_exception=e)
return self._structured_error(error)
def _flask_handler(self, environ: t.Dict[str, t.Any], start_response: t.Callable[[t.Any], None]):
"""Flask handler.
"""
self._update_config(load_env=True)
return self.wsgi_app(environ, start_response)
def _update_config(self, *, load_env: bool, workspace: str = None):
if not self._cws_conf_updated:
workspace = workspace or os.environ.get('WORKSPACE', DEFAULT_DEV_WORKSPACE)
if workspace == DEFAULT_LOCAL_WORKSPACE:
click.echo(f" * Workspace: {workspace}")
config = self.get_config(workspace)
self.config['WORKSPACE'] = config.workspace
if load_env:
config.load_environment_variables(self.root_path)
self._cws_conf_updated = True
def _check_token(self):
if not request.in_lambda_context:
# Get no_auth option for this entry
no_auth = False
if request.url_rule:
view_function = self.view_functions.get(request.url_rule.endpoint, None)
if view_function:
no_auth = getattr(view_function, '__CWS_NO_AUTH', False)
# Checks token if authorization needed
if not no_auth:
token = request.headers.get('Authorization', self.config.get('DEFAULT_TOKEN'))
if token is None:
raise Unauthorized(www_authenticate=WWWAuthenticate(auth_type="basic"))
valid = self.token_authorizer(token)
if not valid:
abort(403)
def _get_kwargs(self, event):
def is_json(mt):
return (
mt == "application/json"
or type(mt) is str
and mt.startswith("application/")
and mt.endswith("+json")
)
kwargs = {}
content_type = event['headers'].get('content-type')
if content_type:
kwargs['content_type'] = content_type
method = event['httpMethod']
if method not in ['PUT', 'POST']:
return kwargs
is_encoded = event.get('isBase64Encoded', False)
body = event['body']
if body and is_encoded:
body = self.base64decode(body)
self.logger.debug(f"Body: {body}")
if is_json(content_type):
kwargs['json'] = body
return kwargs
kwargs['data'] = body
return kwargs
def _convert_to_lambda_response(self, resp):
"""Convert Lambda response."""
# returns JSON structure
if resp.is_json:
try:
return self._structured_payload(resp.json, resp.status_code, resp.headers)
except (Exception,):
resp.mimetype = "text/plain"
# returns simple string JSON structure
if resp.mimetype.startswith('text'):
try:
return self._structured_payload(resp.get_data(True), resp.status_code, resp.headers)
except ValueError:
pass
# returns direct payload
return self.base64encode(resp.get_data())
def _structured_payload(self, body, status_code, headers):
return {
"statusCode": status_code,
"headers": {k: v for k, v in headers.items()},
"body": body,
"isBase64Encoded": False,
}
def _structured_error(self, e: HTTPException):
headers = {'content_type': "application/json"}
return self._structured_payload(e.description, e.code, headers)
def schedule(self, *args, **kwargs):
raise Exception("Schedule decorator is defined on BizMicroService, not on TechMicroService")
class BizMicroService(TechMicroService):
"""Biz composed microservice activated | |
__createSvgTable__(tempData)
root.appendChild(headerGroup)
root.appendChild(alternativeTableHeaderBackgroundGround)
root.appendChild(alternativeTableDataBackgrounGroup)
root.appendChild(alternativeNewWordGroup)
root.appendChild(xmlAlternativeTable)
return xmlDoc.toxml('utf-8')
# meanData, rangeData and stdData should be objects of the resultRatingWeightTableData class
def convertRatingWeightSessionResultToSvg(meanData, rangeData, stdData):
###########settings###########
fontSize = 30
headerFontSize = 45
f = ImageFont.truetype(DENDROGRAM_FONT_LOCATION, fontSize)
fHeader = ImageFont.truetype(DENDROGRAM_FONT_LOCATION, headerFontSize)
fontName = 'arial'
meanTableYOffset = 5 # offset from the button of the header. In pixels
rangeTableYOffset = 5 # offset from the button of the header. In pixels
stdTableYOffset = 5 # offset from the button of the header. In pixels
meanTableXOffset = 5 # offset from the biggest left concern to the table and the offset from the table to the right concern. In pixels
rangeTableXOffset = 5 # offset from the biggest left concern to the table and the offset from the table to the right concern. In pixels
stdTableXOffset = 5 # offset from the biggest left concern to the table and the offset from the table to the right concern. In pixels
tableHeaderLinearColor1 = (141, 179, 235)
tableHeaderLinearColor2 = (95, 144, 227)
xWordCellSpace = 5 # space between a word and the right and left line of the cell (in pixels)
yWordCellSpace = 5 # space between the biggest word and the top and bottom line of the cell (in pixels)
meanHeaderYOffset = 10 # offset from the top of the header to the bottom of the element above it. In pixels
rangeHeaderYOffset = 10 # offset from the top of the header to the bottom of the element above it. In pixels
stdHeaderYOffset = 10 # offset from the top of the header to the bottom of the element above it. In pixels
meanLeftConcernWordOffset = 5
rangeLeftConcernWordOffset = 5
stdLeftConcernWordOffset = 5
meanRightConcernWordOffset = 5
rangeRightConcernWordOffset = 5
stdRightConcernWordOffset = 5
tableLineThickness = 1 # in pixels
tableLineColor = (60, 179, 113) # value in rbg
tableCellWordColor = (60, 179, 113) # value in rbg
tableHeaderCellWordColor = (255, 255, 255) # value in rbg
##############end##############
tempData = __TableData___()
rangeTableData = []
meanTableData = []
stdTableData = []
tempData.nRows = len(meanData.tableData) + 1 # it is + 1 for the table header
tempData.nCols = len(meanData.tableHeader)
rangeColWidths = [0 for x in xrange(tempData.nCols)] # @UnusedVariable
meanColWidths = [0 for x in xrange(tempData.nCols)] # @UnusedVariable
stdColWidths = [0 for x in xrange(tempData.nCols)] # @UnusedVariable
rangeCellHeight = 0
meanCellHeight = 0
stdCellHeight = 0
meanTableYTotalOffset = 0 # @UnusedVariable
rangeTableYTotalOffset = 0 # @UnusedVariable
stdTableYTotalOffset = 0 # @UnusedVariable
meanTableXTotalOffset = 0 # @UnusedVariable
rangeTableXTotalOffset = 0 # @UnusedVariable
stdTableXTotalOffset = 0 # @UnusedVariable
meanHeaderSize = None # @UnusedVariable
rangeHeaderSize = None # @UnusedVariable
stdHeaderSize = None # @UnusedVariable
meanTablesize = None # tulip (width, height) #@UnusedVariable
rangeTablesize = None # tulip (width, height) #@UnusedVariable
stdTablesize = None # tulip (width, height) #@UnusedVariable
meanLeftConcernWordMaxWidth = 0
rangeLeftConcernWordMaxWidth = 0
stdLeftConcernWordMaxWidth = 0
tempData.tableLineColor = tableLineColor
tempData.lineThickness = tableLineThickness
tempData.tableCellWordColor = tableCellWordColor
tempData.tableHeaderCellWordColor = tableHeaderCellWordColor
tempData.fontName = fontName
tempData.fontObject = f
tempData.fontSize = fontSize
####################Pre-processing####################
# create the correct format for the __createSvgTable__ of the tables data
i = 0
j = 0
while i < tempData.nRows - 1:
rowMean = []
rowRange = []
rowStd = []
while j < tempData.nCols:
rowMean.append(meanData.tableData[i][j][0])
rowRange.append(rangeData.tableData[i][j][0])
rowStd.append(stdData.tableData[i][j][0])
size = None
if type(rowMean[j]) != StringType or type(rowMean[j]) != UnicodeType:
size = f.getsize(str(rowMean[j]))
else:
size = f.getsize(rowMean[j])
if size[0] > meanColWidths[j]:
meanColWidths[j] = size[0]
if size[1] > meanCellHeight:
meanCellHeight = size[1]
if type(rowRange[j]) != StringType or type(rowRange[j]) != UnicodeType:
size = f.getsize(str(rowRange[j]))
else:
size = f.getsize(rowRange[j])
if size[0] > rangeColWidths[j]:
rangeColWidths[j] = size[0]
if size[1] > rangeCellHeight:
rangeCellHeight = size[1]
if type(rowStd[j]) != StringType or type(rowStd[j]) != UnicodeType:
size = f.getsize(str(rowStd[j]))
else:
size = f.getsize(rowStd[j])
if size[0] > stdColWidths[j]:
stdColWidths[j] = size[0]
if size[1] > stdCellHeight:
stdCellHeight = size[1]
j += 1
rangeTableData.append(rowRange)
meanTableData.append(rowMean)
stdTableData.append(rowStd)
j = 0
i += 1
# add the table header to the table data and check the sizes
i = 0
while i < tempData.nCols:
size = f.getsize(meanData.tableHeader[i])
if size[0] > meanColWidths[i]:
meanColWidths[i] = size[0]
if size[1] > meanCellHeight:
meanCellHeight = size[1]
size = f.getsize(rangeData.tableHeader[i])
if size[0] > rangeColWidths[i]:
rangeColWidths[i] = size[0]
if size[1] > rangeCellHeight:
rangeCellHeight = size[1]
size = f.getsize(stdData.tableHeader[i])
if size[0] > stdColWidths[i]:
stdColWidths[i] = size[0]
if size[1] > stdCellHeight:
stdCellHeight = size[1]
i += 1
# add xWordCellSpace * 2 to each position of the array (in place)
meanColWidths[:] = [x + (xWordCellSpace * 2) for x in meanColWidths]
rangeColWidths[:] = [x + (xWordCellSpace * 2) for x in rangeColWidths]
stdColWidths[:] = [x + (xWordCellSpace * 2) for x in stdColWidths]
# add the extra space to the cell height
meanCellHeight += 2 * yWordCellSpace
rangeCellHeight += 2 * yWordCellSpace
stdCellHeight += 2 * yWordCellSpace
# calculate the size of the hear words
meanHeaderSize = fHeader.getsize(meanData.header)
rangeHeaderSize = fHeader.getsize(rangeData.header)
stdHeaderSize = fHeader.getsize(stdData.header)
# calculate the height and width of the tables
height = (tempData.nRows + 1) * tableLineThickness + (tempData.nRows * meanCellHeight)
width = (tempData.nCols + 1) * tableLineThickness + sum(meanColWidths)
meanTablesize = (width, height)
height = 0
width = 0
height = (tempData.nRows + 1) * tableLineThickness + (tempData.nRows * rangeCellHeight)
width = (tempData.nCols + 1) * tableLineThickness + sum(rangeColWidths)
rangeTablesize = (width, height)
height = 0
width = 0
height = (tempData.nRows + 1) * tableLineThickness + (tempData.nRows * stdCellHeight)
width = (tempData.nCols + 1) * tableLineThickness + sum(stdColWidths)
stdTablesize = (width, height)
# calculate max width of the left concern
i = 0
while i < len(meanData.concerns):
size = f.getsize(meanData.concerns[i][0])
if size[0] > meanLeftConcernWordMaxWidth:
meanLeftConcernWordMaxWidth = size[0]
size = f.getsize(rangeData.concerns[i][0])
if size[0] > rangeLeftConcernWordMaxWidth:
rangeLeftConcernWordMaxWidth = size[0]
size = f.getsize(stdData.concerns[i][0])
if size[0] > stdLeftConcernWordMaxWidth:
stdLeftConcernWordMaxWidth = size[0]
i += 1
# calculate the offset of each table
# y offset
meanTableYTotalOffset = meanHeaderSize[1] + meanHeaderYOffset + meanTableYOffset
rangeTableYTotalOffset = rangeHeaderSize[1] + rangeHeaderYOffset + rangeTableYOffset
rangeTableYTotalOffset += meanTableYTotalOffset + meanTablesize[1]
stdTableYTotalOffset = stdHeaderSize[1] + stdHeaderYOffset + stdTableYOffset
stdTableYTotalOffset += rangeTableYTotalOffset + rangeTablesize[1]
# x offset
meanTableXTotalOffset = meanLeftConcernWordMaxWidth + meanTableXOffset + meanLeftConcernWordOffset
rangeTableXTotalOffset = rangeLeftConcernWordMaxWidth + rangeTableXOffset + rangeLeftConcernWordOffset
stdTableXTotalOffset = stdLeftConcernWordMaxWidth + stdTableXOffset + stdLeftConcernWordOffset
####################End pre-processing####################
# add the table header
meanTableData.insert(0, meanData.tableHeader)
rangeTableData.insert(0, rangeData.tableHeader)
stdTableData.insert(0, stdData.tableHeader)
# create xml doc
imp = SvgDOMImplementation()
xmlDoc = imp.createSvgDocument()
root = xmlDoc.documentElement
root.setXmlns('http://www.w3.org/2000/svg')
root.setVersion('1.1')
tempNode = None # @UnusedVariable
globalDefNode = xmlDoc.createDefsNode()
root.appendChild(globalDefNode)
meanTableDataGroup = xmlDoc.createGNode()
meanTableDataGroup.setId('meanTableDataGroup')
rangeTableDataGroup = xmlDoc.createGNode()
rangeTableDataGroup.setId('rangeTableDataGroup')
stdTableDataGroup = xmlDoc.createGNode()
stdTableDataGroup.setId('stdTableDataGroup')
########## create first row (header) linear gradient ##########
tempNode = xmlDoc.createLinearGradientNode('0%', '0%', '0%', '100%')
tempNode.appendChild(xmlDoc.createStopNode('0%', createColorRGBString(tableHeaderLinearColor1), 1))
tempNode.appendChild(xmlDoc.createStopNode('100%', createColorRGBString(tableHeaderLinearColor2), 1))
tempNode.setId('tableHeaderGradient')
globalDefNode.appendChild(tempNode)
########## end create first row (header) linear gradient ##########
########## create the svg tables##########
tempData.tableData = meanTableData
tempData.cellHeight = meanCellHeight
tempData.cellWidths = meanColWidths
tempData.yTableOffSet = meanTableYTotalOffset
tempData.xTableOffSet = meanTableXTotalOffset
xmlTableMean = __createSvgTable__(tempData)
tempData.tableData = rangeTableData
tempData.cellHeight = rangeCellHeight
tempData.cellWidths = rangeColWidths
tempData.yTableOffSet = rangeTableYTotalOffset
tempData.xTableOffSet = rangeTableXTotalOffset
xmlTableRange = __createSvgTable__(tempData)
tempData.tableData = stdTableData
tempData.cellHeight = stdCellHeight
tempData.cellWidths = stdColWidths
tempData.yTableOffSet = stdTableYTotalOffset
tempData.xTableOffSet = stdTableXTotalOffset
xmlTableStd = __createSvgTable__(tempData)
########## end create the svg tables ##########
########## create the background of the table headers ##########
meanTableHeaderBackgroundGroup = xmlDoc.createGNode()
meanTableHeaderBackgroundGroup.setId('meanTableHeaderBackgroundGroup')
rangeTableHeaderBackgroundGroup = xmlDoc.createGNode()
rangeTableHeaderBackgroundGroup.setId('rangeTableHeaderBackgroundGroup')
stdTableHeaderBackgroundGroup = xmlDoc.createGNode()
stdTableHeaderBackgroundGroup.setId('stdTableHeaderBackgroundGroup')
i = 0
while i < len(meanData.tableHeader):
# calculate the total length of cols from 0 to i for all 3 tables
n1 = 0
n2 = 0
n3 = 0
j = 0
while j < i:
n1 += meanColWidths[j]
n2 += rangeColWidths[j]
n3 += stdColWidths[j]
j += 1
# mean
x = meanTableXTotalOffset + (tableLineThickness / 2) + n1 + i * tableLineThickness
y = meanTableYTotalOffset + (tableLineThickness / 2)
tempNode = xmlDoc.createRectNode(x, y, meanCellHeight + tableLineThickness,
meanColWidths[i] + tableLineThickness)
tempNode.setFill('url(#tableHeaderGradient)')
meanTableHeaderBackgroundGroup.appendChild(tempNode)
# range
x = rangeTableXTotalOffset + (tableLineThickness / 2) + n2 + i * tableLineThickness
y = rangeTableYTotalOffset + (tableLineThickness / 2)
tempNode = xmlDoc.createRectNode(x, y, rangeCellHeight + tableLineThickness,
rangeColWidths[i] + tableLineThickness)
tempNode.setFill('url(#tableHeaderGradient)')
rangeTableHeaderBackgroundGroup.appendChild(tempNode)
# std
x | |
<gh_stars>1-10
# GameHelper.py
# Class implementation for 'GameHelper'
import sys
import json
import struct
import msgpack
from game.Map import Map
from game.Command import Command
from game.Coordinate import Coordinate, direction_deltas
from game.params import (
MOVE_COMMAND,
BUILD_COMMAND,
MINE_COMMAND,
HIVE_COST,
PLAYER_LOG_FN,
LOG_FE,
Direction
)
from game.ObstacleMapProblem import ObstacleMapProblem
from game.Bot import read, write
from game.astar_search import astar_search
# ------------------------------------------------------------------------------
# GameHelper
# A GameHelper instance wraps all of the game logic functionality into
# a convenient package to aid users in bot development.
class GameHelper:
def __init__(self):
# first thing the game server sends us through STDIN is our player id
self.myId = read(sys.stdin.buffer)
# second thing is number of players
self.numPlayers = read(sys.stdin.buffer)
self.params = {} # default the params to an empty dict
self.load_params() # then load in the params (3rd thing sent to a bot)
self.map = None
# list of enemy IDs
self.eIds = list(range(self.numPlayers))
self.eIds.remove(self.myId)
self.me = {"resources": 0}
self.turn_handler = None
self.player_log_fp = open(PLAYER_LOG_FN + str(self.myId) + LOG_FE, "w")
self.move_dict = {}
self.command_queue = []
def __del__(self):
self.player_log_fp.close()
# --------------------------------------------------------------------------
# BOT PARAMETERS
# Reads bot parameters over stdin and sets them in parameter dictionary
def load_params(self):
params = read(sys.stdin.buffer)
if not params:
return
# for each param, parse it into the correct type
for p in params:
if p['type'] == 'INT':
self.params[p['name']] = int(p['value'])
elif p['type'] == 'FLOAT':
self.params[p['name']] = float(p['value'])
else:
self.params[p['name']] = p['value']
def set_default_params(self, params):
for (val, key) in enumerate(params):
if key not in self.params:
self.params[key] = val
# Get the value for a parameter specified externally (via the web UI)
# Return:
# value, or None if nonexistent param
def param(self, param_name):
if param_name in self.params:
return self.params[param_name]
return None
# --------------------------------------------------------------------------
# TODO: fix
def get_movement_directions(self):
return [
Direction.EAST,
Direction.WEST,
Direction.NORTHEAST,
Direction.NORTHWEST,
Direction.SOUTHEAST,
Direction.SOUTHWEST,
Direction.NONE
]
# --------------------------------------------------------------------------
# COMMAND CREATION
# Create a move command and append it to the command queue.
# Return: (None)
def move(self, position_from, num_units, direction):
command = Command(self.myId, Coordinate(position_from), MOVE_COMMAND, num_units, direction)
if command:
self.command_queue.append(command)
# Create a move command and append it to the command queue,
# this time with an additional layer of abstraction to aid development.
# Return: (None)
def move_towards(self, position_from, position_to, num_units=None):
position_to = Coordinate(position_to)
position_from = Coordinate(position_from)
if position_from == position_to:
return None
if position_from.x < position_to.x:
d = Direction.EAST
elif position_from.x > position_to.x:
d = Direction.WEST
elif position_from.y < position_to.y:
d = Direction.SOUTHWEST
elif position_from.y > position_to.y:
d = Direction.NORTHEAST
n = num_units if num_units else self.get_unit_count_by_position(position_from.x, position_from.y)
self.move(position_from, n, d)
# Create a build command and append it to the command queue.
# Return: (None)
def build(self, position):
# TODO: track concurrent mine and build commands:
# right now, hive simply requires a single unit in a cell
# do we care though that this unit will also be able to mine as well?
# the difference is small, but it will be more realistic in some sense
# if we have this logic.
command = Command(self.myId, Coordinate(position), BUILD_COMMAND, 1, Direction.NONE)
if command:
self.command_queue.append(command)
# Create a mine command and append it to the command queue.
# Return: (None)
def mine(self, position, num_units):
command = Command(self.myId, Coordinate(position), MINE_COMMAND, num_units, Direction.NONE)
if command:
self.command_queue.append(command)
# --------------------------------------------------------------------------
# CELL GETTERS
# Get map cell at specified (x, y) coordinate.
# Return: (Cell)
# Cell at <position> if <position> is valid, else None
def get_cell(self, x, y=None):
if type(x) == Coordinate:
return self.map.get_cell(x)
# map handles validity check
return self.map.get_cell(Coordinate(x, y))
# Get count of all of my cells on the map.
# Return: (number)
# the number of cells on the map that I control.
def get_my_cell_count(self):
return len(self.get_my_cells())
# Get count of all of enemy cells on the map.
# Return: (number)
# the number of cells on the map that are enemy controlled.
def get_enemy_cell_count(self):
return len(self.get_enemy_cells())
# Get count of all cells controlled by player with playerId on the map.
# Return: (number)
# the number of cells on the map that are controlled by player with playerId
def get_player_cell_count(self, playerId):
return len(self.get_player_cells(playerId))
# Get a list of all my cells on the map.
# Return: (list of Cell)
# list of all my cells
def get_my_cells(self):
return self.get_occupied_cells(self.myId)
# Get a list of all enemy cells on the map.
# Return: (list of Cell)
# list of all enemy cells
def get_enemy_cells(self):
enemy_cells = []
for eId in self.eIds:
single_enemy_cells = self.get_occupied_cells(eId)
for single_enemy_cell in single_enemy_cells:
enemy_cells.append(single_enemy_cell)
return enemy_cells
# Get a list of all cells controlled by player with playerId on the map.
# Return: (list of Cell)
# list of all cells controlled by player with playerId
def get_player_cells(self, playerId):
return self.get_occupied_cells(playerId)
# Get a list of all cells in which I have a hive.
# Return: (list of Cell)
# list of all my hive-occupied cells
def get_my_hive_sites(self):
cells = []
for col in self.map.cells:
for cell in col:
if cell.hive and cell.hive.ownerId == self.myId:
cells.append(cell)
return cells
# Get a list of all cells in which enemy has a hive.
# Return: (list of Cell)
# list of all enemy hive-occupied cells
def get_enemy_hive_sites(self):
cells = []
for col in self.map.cells:
for cell in col:
if cell.hive and cell.hive.ownerId != self.myId:
cells.append(cell)
return cells
# Get a list of all cells in which player specified by <playerId>
# has at least one unit - this is equivalent to control of this cell.
# Return: (list of Cell)
# list of all cells occupied by player with <playerId>
def get_occupied_cells(self, playerId):
cells = []
for col in self.map.cells:
for cell in col:
if cell.units[playerId] > 0:
cells.append(cell)
return cells
# --------------------------------------------------------------------------
# HIVE DATA GETTERS
# Get a count of all hives on the map that I control.
# Return: (number)
# the number of hives on the map that I control
def get_my_hive_count(self):
return len(self.get_my_hives())
# Get a count of all hives on the map that are enemy controlled.
# Return: (number)
# the number of hives on the map that are enemy controlled
def get_enemy_hive_count(self):
return len(self.get_enemy_hives())
# Get a count of all hives on the map controlled by player with playerId
# Return: (number)
# the number of hives on the map controlled by player with playerId
def get_player_hive_count(self, playerId):
return len(self.get_player_hives(playerId))
# Get a list of all my hives on the map.
# Return: (list of Hive)
# list of hives on the map that I control
def get_my_hives(self):
return self.get_player_hives(self.myId)
# Get a list of all enemy hives on the map.
# Return: (list of Hive)
# list of hives on the map that the enemy players control
def get_enemy_hives(self):
blds = []
for col in self.map.cells:
for cell in col:
if cell.hive and cell.hive.ownerId != self.myId:
blds.append(cell.hive)
return blds
# Get a list of all hives controlled by a certain player
# Return: (list of hive)
# list of hive instances controlled by <playerId>
def get_player_hives(self, playerId):
hives = []
for col in self.map.cells:
for cell in col:
if cell.hive and (cell.hive.ownerId == playerId):
hives.append(cell.hive)
return hives
# Get a list of all hives on the map
# Return: (list of hives)
# flat list of all hives on the map, regardless of player
def get_all_hives(self):
all_hives = []
for player_id in range(self.map.num_players):
for hive in self.get_player_hives(player_id):
all_hives.append(hive)
return all_hives
# Get a list of all my hives' positions on the map.
# Return: (list of positions)
# list of positions of hives on the map that I control
def get_my_hive_positions(self):
return self.get_player_hive_positions(self.myId)
# Get a list of all enemy hives on the map.
# Return: (list of Hive)
# list of hives on the map that the enemy players control
def get_enemy_hive_positions(self):
positions = []
for col in self.map.cells:
for cell in col:
if cell.hive and cell.hive.ownerId != self.myId:
positions.append(cell.positions)
return positions
# Get a list of all hives controlled by a certain player
| |
HEAVY BARB ARROW': 129154,
'WIDE-HEADED SOUTH EAST BARB ARROW': 129134,
'WIDE-HEADED SOUTH EAST HEAVY BARB ARROW': 129150,
'WIDE-HEADED SOUTH EAST LIGHT BARB ARROW': 129126,
'WIDE-HEADED SOUTH EAST MEDIUM BARB ARROW': 129142,
'WIDE-HEADED SOUTH EAST VERY HEAVY BARB ARROW': 129158,
'WIDE-HEADED SOUTH WEST BARB ARROW': 129135,
'WIDE-HEADED SOUTH WEST HEAVY BARB ARROW': 129151,
'WIDE-HEADED SOUTH WEST LIGHT BARB ARROW': 129127,
'WIDE-HEADED SOUTH WEST MEDIUM BARB ARROW': 129143,
'WIDE-HEADED SOUTH WEST VERY HEAVY BARB ARROW': 129159,
'WIDE-HEADED UPWARDS BARB ARROW': 129129,
'WIDE-HEADED UPWARDS HEAVY BARB ARROW': 129145,
'WIDE-HEADED UPWARDS LIGHT BARB ARROW': 129121,
'WIDE-HEADED UPWARDS MEDIUM BARB ARROW': 129137,
'WIDE-HEADED UPWARDS VERY HEAVY BARB ARROW': 129153,
'WIGGLY VERTICAL LINE': 11838,
'WIND BLOWING FACE': 127788,
'WIND CHIME': 127888,
'WINE GLASS': 127863,
'WINKING FACE': 128521,
'WIRED KEYBOARD': 128430,
'WJ': 983225,
'WOLF FACE': 128058,
'WOMAN': 128105,
'WOMAN WITH BUNNY EARS': 128111,
'WOMANS BOOTS': 128098,
'WOMANS CLOTHES': 128090,
'WOMANS HAT': 128082,
'WOMANS SANDAL': 128097,
'WOMENS SYMBOL': 128698,
'WORLD MAP': 128506,
'WORRIED FACE': 128543,
'WRAPPED PRESENT': 127873,
'WRENCH': 128295,
'YELLOW HEART': 128155,
'YI SYLLABLE ITERATION MARK': 983235,
'ZIPPER-MOUTH FACE': 129296,
'ZWJ': 983215,
'ZWNBSP': 983255,
'ZWNJ': 983214,
'ZWSP': 983213,
}
_code_by_name_corrected = {
}
_cjk_prefix = "CJK UNIFIED IDEOGRAPH-"
_hangul_prefix = 'HANGUL SYLLABLE '
_hangul_L = ['G', 'GG', 'N', 'D', 'DD', 'R', 'M', 'B', 'BB',
'S', 'SS', '', 'J', 'JJ', 'C', 'K', 'T', 'P', 'H']
_hangul_V = ['A', 'AE', 'YA', 'YAE', 'EO', 'E', 'YEO', 'YE', 'O', 'WA', 'WAE',
'OE', 'YO', 'U', 'WEO', 'WE', 'WI', 'YU', 'EU', 'YI', 'I']
_hangul_T = ['', 'G', 'GG', 'GS', 'N', 'NJ', 'NH', 'D', 'L', 'LG', 'LM',
'LB', 'LS', 'LT', 'LP', 'LH', 'M', 'B', 'BS', 'S', 'SS',
'NG', 'J', 'C', 'K', 'T', 'P', 'H']
def _lookup_hangul(syllables):
l_code = v_code = t_code = -1
for i in range(len(_hangul_L)):
jamo = _hangul_L[i]
if (syllables[:len(jamo)] == jamo and
(l_code < 0 or len(jamo) > len(_hangul_L[l_code]))):
l_code = i
if l_code < 0:
raise KeyError
start = len(_hangul_L[l_code])
for i in range(len(_hangul_V)):
jamo = _hangul_V[i]
if (syllables[start:start + len(jamo)] == jamo and
(v_code < 0 or len(jamo) > len(_hangul_V[v_code]))):
v_code = i
if v_code < 0:
raise KeyError
start += len(_hangul_V[v_code])
for i in range(len(_hangul_T)):
jamo = _hangul_T[i]
if (syllables[start:start + len(jamo)] == jamo and
(t_code < 0 or len(jamo) > len(_hangul_T[t_code]))):
t_code = i
if t_code < 0:
raise KeyError
start += len(_hangul_T[t_code])
if len(syllables[start:]):
raise KeyError
return 0xAC00 + (l_code * 21 + v_code) * 28 + t_code
def _lookup_cjk(cjk_code):
if len(cjk_code) != 4 and len(cjk_code) != 5:
raise KeyError
for c in cjk_code:
if not ('0' <= c <= '9' or 'A' <= c <= 'F'):
raise KeyError
code = int(cjk_code, 16)
if (0x3400 <= code <= 0x4DB5 or 0x4E00 <= code <= 0x9FD5 or 0x20000 <= code <= 0x2A6D6 or 0x2A700 <= code <= 0x2B734 or 0x2B740 <= code <= 0x2B81D or 0x2B820 <= code <= 0x2CEA1):
return code
raise KeyError
def lookup(name, with_named_sequence=False):
if name[:len(_cjk_prefix)] == _cjk_prefix:
return _lookup_cjk(name[len(_cjk_prefix):])
if name[:len(_hangul_prefix)] == _hangul_prefix:
return _lookup_hangul(name[len(_hangul_prefix):])
if not base_mod:
code = trie_lookup(name)
else:
try:
code = _code_by_name[name]
except KeyError:
if name not in _code_by_name_corrected:
code = base_mod.trie_lookup(name)
else:
raise
if not with_named_sequence and 0xF0200 <= code < 0xF0400:
raise KeyError
return code
def name(code):
if (0x3400 <= code <= 0x4DB5 or 0x4E00 <= code <= 0x9FD5 or 0x20000 <= code <= 0x2A6D6 or 0x2A700 <= code <= 0x2B734 or 0x2B740 <= code <= 0x2B81D or 0x2B820 <= code <= 0x2CEA1):
return "CJK UNIFIED IDEOGRAPH-" + hex(code)[2:].upper()
if 0xAC00 <= code <= 0xD7A3:
# vl_code, t_code = divmod(code - 0xAC00, len(_hangul_T))
vl_code = (code - 0xAC00) // len(_hangul_T)
t_code = (code - 0xAC00) % len(_hangul_T)
# l_code, v_code = divmod(vl_code, len(_hangul_V))
l_code = vl_code // len(_hangul_V)
v_code = vl_code % len(_hangul_V)
return ("HANGUL SYLLABLE " + _hangul_L[l_code] +
_hangul_V[v_code] + _hangul_T[t_code])
if 0xF0000 <= code < 0xF0400:
raise KeyError
if not base_mod:
return lookup_charcode(code)
else:
try:
return _names[code]
except KeyError:
if code not in _names_corrected:
return base_mod.lookup_charcode(code)
else:
raise
_db_records = [
('Cc', 'B', 'N', 5),
('Cc', 'BN', 'N', 0),
('Cc', 'S', 'N', 1),
('Cc', 'S', 'N', 5),
('Cc', 'WS', 'N', 5),
('Cf', 'AL', 'N', 8192),
('Cf', 'AN', 'N', 8192),
('Cf', 'BN', 'A', 8192),
('Cf', 'BN', 'N', 8192),
('Cf', 'FSI', 'N', 8192),
('Cf', 'L', 'N', 8192),
('Cf', 'LRE', 'N', 8192),
('Cf', 'LRI', 'N', 8192),
('Cf', 'LRO', 'N', 8192),
('Cf', 'ON', 'N', 8192),
('Cf', 'PDF', 'N', 8192),
('Cf', 'PDI', 'N', 8192),
('Cf', 'R', 'N', 8192),
('Cf', 'RLE', 'N', 8192),
('Cf', 'RLI', 'N', 8192),
('Cf', 'RLO', 'N', 8192),
('Cn', '', 'N', 0),
('Cn', '', 'W', 0),
('Co', 'L', 'A', 0),
('Cs', 'L', 'N', 0),
('Ll', 'L', 'A', 7202),
('Ll', 'L', 'F', 7202),
('Ll', 'L', 'N', 7202),
('Ll', 'L', 'Na', 7202),
('Ll', 'R', 'N', 7202),
('Lm', 'AL', 'N', 15362),
('Lm', 'L', 'A', 15362),
('Lm', 'L', 'A', 15394),
('Lm', 'L', 'H', 14338),
('Lm', 'L', 'H', 15362),
('Lm', 'L', 'N', 12322),
('Lm', 'L', 'N', 15362),
('Lm', 'L', 'N', 15394),
('Lm', 'L', 'W', 15362),
('Lm', 'ON', 'A', 15362),
('Lm', 'ON', 'N', 12290),
('Lm', 'ON', 'N', 15362),
('Lm', 'R', 'N', 15362),
('Lo', 'AL', 'N', 4098),
('Lo', 'AL', 'N', 7170),
('Lo', 'L', 'A', 7202),
('Lo', 'L', 'H', 7170),
('Lo', 'L', 'N', 6146),
('Lo', 'L', 'N', 7170),
('Lo', 'L', 'W', 7170),
('Lo', 'L', 'W', 7234),
('Lo', 'R', 'N', 7170),
('Lt', 'L', 'N', 7186),
('Lu', 'L', 'A', 7178),
('Lu', 'L', 'F', 7178),
('Lu', 'L', 'N', 7178),
('Lu', 'L', 'Na', 7178),
('Lu', 'R', 'N', 7178),
('Mc', 'L', 'N', 6144),
('Mc', 'L', 'W', 6144),
('Me', 'NSM', 'N', 12288),
('Mn', 'L', 'N', 14336),
('Mn', 'NSM', 'A', 14336),
('Mn', 'NSM', 'A', 14368),
('Mn', 'NSM', 'N', 14336),
('Mn', 'NSM', 'W', 14336),
('Nd', 'AN', 'N', 6592),
('Nd', 'EN', 'F', 6592),
('Nd', 'EN', 'N', 6592),
('Nd', 'EN', 'Na', 6592),
('Nd', 'L', 'N', 6592),
('Nd', 'R', 'N', 6592),
('Nl', 'L', 'A', 7240),
('Nl', 'L', 'A', 7264),
('Nl', 'L', 'N', 7232),
('Nl', 'L', 'N', 7240),
('Nl', 'L', 'N', 7264),
('Nl', 'L', 'W', 7232),
('Nl', 'ON', 'N', 7232),
('No', 'AN', 'N', 4160),
('No', 'AN', 'N', 4288),
('No', 'EN', 'A', 4160),
('No', 'EN', 'A', 4288),
('No', 'EN', 'N', 4160),
('No', 'EN', 'N', 4288),
('No', 'L', 'A', 4160),
('No', 'L', 'N', 4160),
('No', 'L', 'N', 6336),
('No', 'L', 'W', 4160),
('No', 'ON', 'A', 4160),
('No', 'ON', 'A', 4288),
('No', 'ON', 'N', 4160),
('No', 'ON', 'N', 4288),
('No', 'ON', 'W', 4160),
('No', 'R', 'N', 4160),
('No', 'R', 'N', 4288),
('Pc', 'ON', 'F', 6144),
('Pc', 'ON', 'N', 6144),
('Pc', 'ON', 'Na', 6144),
('Pc', 'ON', 'W', 6144),
('Pd', 'ES', 'F', 4096),
('Pd', 'ES', 'Na', 4096),
('Pd', 'ES', 'W', 4096),
('Pd', 'ON', 'A', 4096),
('Pd', 'ON', 'N', 4096),
('Pd', 'ON', 'W', 4096),
('Pd', 'R', 'N', 4096),
('Pe', 'ON', 'F', 4608),
('Pe', 'ON', 'H', 4608),
('Pe', 'ON', 'N', 4096),
('Pe', 'ON', 'N', 4608),
('Pe', 'ON', 'Na', 4608),
('Pe', 'ON', 'W', 4096),
('Pe', 'ON', 'W', 4608),
('Pf', 'ON', 'A', 4096),
('Pf', 'ON', 'A', 12288),
('Pf', 'ON', 'N', 4608),
('Pi', 'ON', 'A', 4096),
('Pi', 'ON', 'A', 12288),
('Pi', 'ON', 'N', 4096),
('Pi', 'ON', 'N', 4608),
('Po', 'AL', 'N', 4096),
('Po', 'AN', 'N', 4096),
('Po', 'CS', 'F', 4096),
('Po', 'CS', 'F', 12288),
('Po', 'CS', 'N', 4096),
('Po', 'CS', 'Na', 4096),
('Po', 'CS', 'Na', 12288),
('Po', 'CS', 'W', 4096),
('Po', 'CS', 'W', 12288),
('Po', 'ET', 'A', 4096),
('Po', 'ET', 'F', 4096),
('Po', 'ET', 'N', 4096),
('Po', 'ET', 'Na', 4096),
('Po', 'ET', 'W', 4096),
('Po', 'L', 'N', 4096),
('Po', 'ON', 'A', 4096),
('Po', 'ON', 'A', 12288),
('Po', 'ON', 'A', 14336),
('Po', 'ON', 'F', 4096),
('Po', 'ON', 'F', 12288),
('Po', 'ON', 'H', 4096),
('Po', 'ON', 'N', 4096),
('Po', 'ON', 'N', 14336),
('Po', 'ON', 'Na', 4096),
('Po', 'ON', 'Na', 12288),
('Po', 'ON', 'W', 4096),
('Po', 'ON', 'W', 12288),
('Po', 'R', 'N', 4096),
('Po', 'R', 'N', 12288),
('Ps', 'ON', 'F', 4608),
('Ps', 'ON', 'H', 4608),
('Ps', 'ON', 'N', 4096),
('Ps', 'ON', 'N', 4608),
('Ps', 'ON', 'Na', 4608),
('Ps', 'ON', 'W', 4096),
('Ps', 'ON', 'W', 4608),
('Sc', 'AL', 'N', 4096),
('Sc', 'ET', 'A', 4096),
('Sc', 'ET', 'F', 4096),
('Sc', 'ET', 'H', 4096),
('Sc', 'ET', 'N', 4096),
('Sc', 'ET', 'Na', 4096),
('Sc', 'ET', 'W', 4096),
('Sk', 'AL', 'N', 12288),
('Sk', 'L', 'N', 12288),
('Sk', 'ON', 'A', 12288),
('Sk', 'ON', 'F', 12288),
('Sk', 'ON', 'N', 12288),
('Sk', 'ON', 'Na', 12288),
('Sk', 'ON', 'W', 12288),
('Sm', 'AL', 'N', 4096),
('Sm', 'CS', 'N', 4096),
('Sm', 'ES', 'F', 4096),
('Sm', 'ES', 'N', 4096),
('Sm', 'ES', 'Na', 4096),
('Sm', 'ES', 'W', 4096),
('Sm', 'ET', 'A', 4096),
('Sm', 'ET', 'N', 4096),
('Sm', 'L', 'N', 4096),
('Sm', 'ON', 'A', 4096),
('Sm', 'ON', 'A', 4608),
('Sm', 'ON', 'F', 4096),
('Sm', 'ON', 'F', 4608),
('Sm', 'ON', 'H', 4096),
('Sm', 'ON', 'N', 4096),
('Sm', 'ON', 'N', 4608),
('Sm', 'ON', 'N', 7168),
('Sm', 'ON', 'Na', 4096),
('Sm', 'ON', 'Na', 4608),
('Sm', 'ON', 'W', 4096),
('Sm', 'ON', 'W', 4608),
('So', 'AL', 'N', 4096),
('So', 'ET', 'A', 4096),
('So', 'ET', 'N', 4096),
('So', 'ET', 'N', 7168),
('So', 'L', 'A', 4096),
('So', 'L', 'A', 4104),
('So', 'L', 'A', 4128),
('So', 'L', 'N', 4096),
('So', 'L', 'W', 4096),
('So', 'ON', 'A', 4096),
('So', 'ON', 'F', 4096),
('So', 'ON', 'H', 4096),
('So', 'ON', 'N', 4096),
('So', 'ON', 'Na', 4096),
('So', 'ON', 'W', 4096),
('So', 'R', 'N', 4096),
('Zl', 'WS', 'N', 5),
('Zp', 'B', 'N', 5),
('Zs', 'CS', 'N', 1),
('Zs', 'WS', 'F', 1),
('Zs', 'WS', 'N', 1),
('Zs', 'WS', 'Na', 4097),
]
_db_pgtbl = (
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123455565575555'
'555555555555589:5;5<55=5>55555?@55AB555C5555555D555E55F555555555'
'G555H5555555IJ55555555K55555555LM555N\x15OPQRST55555555555555555555'
'55555555555555555555555UVVVVVVVVWWWWWWWWWWWWWWWWWWWWWWWWWXYZ[\\]^'
'_`abcd\x15efghijklkmnopqrstukvkkkkk\x15\x15\x15wxykkkkkkkkkk\x15\x15\x15\x15zkkkkkkkkkkk'
'kkkk\x15\x15{kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk\x15\x15|}kkk~kkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk\x7fkkkkkkkkkkk\x80kkk'
'kkkkkkkkkkkkkkkk\x81\x82\x83\x84\x85\x86\x87\x88((\x89kkkkkkkkkkkkk\x8akkkkk\x8bk\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95kkkkkk'
'\x96\x975555555\x98\x99\x9a55555555555555555555555\x9b55555\x9c55555555555555555\x9d5555'
'5555555555555555555555555555555555\x9e55555555555555555555555555555'
'55555555555555555555555555555555555555\x9f5555555555555555\xa0\xa15555555'
'55555555555555\xa2\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\x9b5\xa4\xa3\xa3\xa3\xa3\xa5'
'\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3'
'\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3'
'\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3'
'\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa3\xa5'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'\xa6\xa7kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW\xa8'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW'
'WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW\xa8'
)
_db_pages = (
'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x02\x00\x03\x04\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x02\xd5\x90\x90\x85\xa2\x85\x90\x91\x9ao\x90\xaf~e\x7f~EEEEEEEEEE\x7f\x90\xbd\xbc\xbd\x90'
'\x9088888888888888888888888888\x9a\x90o\xa9b\xa9\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x1c\x9a\xbco\xbc\x01'
'\x01\x01\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\xd2\x88\xa2\xa2\x9e\xa2\xcd\x88\xa6\xcc-x\xbc\x07\xc9\xa9\xc1\xb1RR\xa6\x1b\x88\x8a\xa6R-tYYY\x88'
'77777757777777775777777\xb45777775\x19\x19\x19\x1b\x1b\x1b\x1b\x19\x1b\x19\x19\x19\x1b\x19\x19\x1b\x1b\x19\x1b\x19\x19\x1b\x1b\x1b\xb4\x19\x19\x19\x1b\x19\x1b\x19\x1b'
'7\x197\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x197\x197\x1b7\x1b7\x1b7\x197\x1b7\x1b7\x1b7\x1b7\x1b5\x197\x1b7\x197\x1b7\x1b7\x195\x197\x1b7\x1b\x197\x1b7\x1b7\x1b5'
'\x195\x197\x197\x1b7\x19\x195\x197\x197\x1b7\x1b5\x197\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b5\x197\x1b7\x197\x1b7\x1b7\x1b7\x1b7\x1b7\x1b77\x1b7\x1b7\x1b\x1b'
'\x1b77\x1b7\x1b77\x1b777\x1b\x1b7777\x1b77\x1b777\x1b\x1b\x1b77\x1b77\x1b7\x1b7\x1b77\x1b7\x1b\x1b7\x1b77\x1b777\x1b7\x1b77\x1b\x1b07\x1b\x1b\x1b'
'<KEY>'
'7\<KEY>'
'\<KEY>'
'\<KEY>%%%%%%%%%))$$$$$'
"%%\xa8\xa8\xa6\xa8)')''')'))\x1f$\xa8\xa8\xa8\xa8\xa8\xa8\xa6\xa6\xa6\xa6\xa8\xa6\xa8\xa6%%%%%\xa8\xa8\xa8\xa8\xa8\xa8\xa8)\xa8$\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8"
'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
'>>>>>?>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>7\x1b7\x1b)\xa87\x1b\x15\x15#\x1b\x1b\x1b\x8e7'
'\x15\x15\x15\x15\xa8\xa87\x8f777\x157\x1577\x1b55555555555555555\x15555555577\x1b\x1b\x1b\x1b\x1b\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19'
'\x19\x19\x1b\x19\x19\x19\x19\x19\x19\x19\x1b\x1b\x1b\x1b\x1b7\x1b\x1b777\x1b\x1b\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b\x1b\x1b\x1b\x1b7\x1b\xb97\x1b77\x1b\x1b777'
'757777777777777755555555555555555555555555555555\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19'
'\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x19\x1b\x19\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b'
'7\x1b\xc7@@@@@<<7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b'
'77\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b'
'7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b\x15777777777777777'
'77777777777777777777777\x15\x15$\x87\x87\x87\x87\x87\x87\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x15\x87h\x15\x15\xcc\xcc\xa1\x15@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@j@'
'\x94@@\x94@@\x94@\x15\x15\x15\x15\x15\x15\x15\x15333333333333333333333333333\x15\x15\x15\x15\x15333\x94\x95\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x06\x06\x06\x06\x06\x06\xb9\xb9\xab\x84\x84\x9d}y\xcc\xcc@@@@@@@@@@@y\x05\x15yy,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
'\x1e,,,,,,,,,,@@@@@@@@@@@@@@@@@@@@@BBBBBBBBBB\x84zzy,,@,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,y,@@@@@@@\x06\xcc@@@@@@\x1e\x1e@@\xcc@@@@,,DDDDDDDDDD,,,\xc0\xc0,'
'yyyyyyyyyyyyyy\x15\x05,@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@@@@@@@@@@@@@'
'@@@@@@@@@@@\x15\x15,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@@@@@@@@,\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'GGGGGGGGGG333333333333333333333333333333333@@@@@@@@@**\xcc\x8e\x8e\x8e*\x15\x15\x15\x15\x15'
'3333333333333333333333@@@@*@@@@@@@@@*@@@*@@@@@\x15\x15\x94\x94\x94\x94\x94\x94\x94\x94\x94\x94\x94\x94\x94\x94\x94\x15'
'3333333333333333333333333@@@\x15\x15\x94\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15,,,,,,,,,,,,,,,,,,,,,\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
'@@@:000000000000000000000000000000000000000000000000000000@:@0::'
':@@@@@@@@::::@::0@@@@@@@0000000000@@\x87\x87FFFFFFFFFF\x87$00000000000000'
'0@::\x1500000000\x15\x1500\x15\x150000000000000000000000\x150000000\x150\x15\x15\x150000\x15\x15@0::'
':@@@@\x15\x15::\x15\x15::@0\x15\x15\x15\x15\x15\x15\x15\x15:\x15\x15\x15\x1500\x15000@@\x15\x15FFFFFFFFFF00\xa1\xa1VVVVVV\xc7\xa1\x15\x15\x15\x15'
'\x15@@:\x15000000\x15\x15\x15\x1500\x15\x150000000000000000000000\x150000000\x1500\x1500\x1500\x15\x15@\x15::'
':@@\x15\x15\x15\x15@@\x15\x15@@@\x15\x15\x15@\x15\x15\x15\x15\x15\x15\x150000\x150\x15\x15\x15\x15\x15\x15\x15FFFFFFFFFF@@000@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15@@:\x15000000000\x15000\x150000000000000000000000\x150000000\x1500\x1500000\x15\x15@0::'
':@@@@@\x15@@:\x15::@\x15\x150\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1500@@\x15\x15FFFFFFFFFF\x87\xa1\x15\x15\x15\x15\x15\x15\x150\x15\x15\x15\x15\x15\x15'
'\x15@::\x1500000000\x15\x1500\x15\x150000000000000000000000\x150000000\x1500\x1500000\x15\x15@0:@'
':@@@@\x15\x15::\x15\x15::@\x15\x15\x15\x15\x15\x15\x15\x15@:\x15\x15\x15\x1500\x15000@@\x15\x15FFFFFFFFFF\xc70VVVVVV\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15@0\x15000000\x15\x15\x15000\x150000\x15\x15\x1500\x150\x1500\x15\x15\x1500\x15\x15\x15000\x15\x15\x15000000000000\x15\x15\x15\x15::'
'@::\x15\x15\x15:::\x15:::@\x15\x150\x15\x15\x15\x15\x15\x15:\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15FFFFFFFFFFVVV\xcc\xcc\xcc\xcc\xcc\xcc\xa1\xcc\x15\x15\x15\x15\x15'
'@:::\x1500000000\x15000\x1500000000000000000000000\x150000000000000000\x15\x15\x150@@'
'@::::\x15@@@\x15@@@@\x15\x15\x15\x15\x15\x15\x15@@\x15000\x15\x15\x15\x15\x1500@@\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x15\x15[[[[[[[\xc7'
'\x15@::\x1500000000\x15000\x1500000000000000000000000\x150000000000\x1500000\x15\x15@0:='
':::::\x15=::\x15::@@\x15\x15\x15\x15\x15\x15\x15::\x15\x15\x15\x15\x15\x15\x150\x1500@@\x15\x15FFFFFFFFFF\x1500\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15@::\x1500000000\x15000\x1500000000000000000000000000000000000000000\x15\x150::'
':@@@@\x15:::\x15:::@0\x15\x15\x15\x15\x15\x15\x15\x15:\x15\x15\x15\x15\x15\x15\x15000@@\x15\x15FFFFFFFFFFVVVVVV\x15\x15\x15\xc7000000'
'\x15\x15::\x15000000000000000000\x15\x15\x15000000000000000000000000\x15000000000\x150\x15\x15'
'0000000\x15\x15\x15@\x15\x15\x15\x15:::@@@\x15@\x15::::::::\x15\x15\x15\x15\x15\x15FFFFFFFFFF\x15\x15::\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15000000000000000000000000000000000000000000000000@0/@@@@@@@\x15\x15\x15\x15\xa1'
'000000$@@@@@@@@\x87FFFFFFFFFF\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x1500\x150\x15\x1500\x150\x15\x150\x15\x15\x15\x15\x15\x150000\x150000000\x15000\x150\x150\x15\x1500\x150000@0/@@@@@@\x15@@0\x15\x15'
'00000\x15$\x15@@@@@@\x15\x15FFFFFFFFFF\x15\x150000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0\xc7\xc7\xc7\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\xc7\x87\xc7\xc7\xc7@@\xc7\xc7\xc7\xc7\xc7\xc7FFFFFFFFFFVVVVVVVVVV\xc7@\xc7@\xc7@\x99n\x99n::'
'00000000\x15000000000000000000000000000000000000\x15\x15\x15\x15@@@@@@@@@@@@@@:'
'@@@@@\x87@@00000@@@@@@@@@@@\x15@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\x15\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7@\xc7\xc7\xc7\xc7\xc7\xc7\x15\xc7\xc7\x87\x87\x87\x87\x87\xc7\xc7\xc7\xc7\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000::@@@@:@@@@@@:@@::@@0'
'FFFFFFFFFF\x87\x87\x87\x87\x87\x87000000::@@0000@@@0:::00:::::::000@@@@00000000000'
'00@::@@::::::@0:FFFFFFFFFF:::@\xc7\xc777777777777777777777777777777777'
'777777\x157\x15\x15\x15\x15\x157\x15\x150000000000000000000000000000000000000000000\x87$000'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111100000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'000000000\x150000\x15\x150000000\x150\x150000\x15\x1500000000000000000000000000000000'
'000000000\x150000\x15\x15000000000000000000000000000000000\x150000\x15\x150000000\x15'
'0\x150000\x15\x15000000000000000\x150000000000000000000000000000000000000000'
'00000000000000000\x150000\x15\x150000000000000000000000000000000000000000'
'000000000000000000000000000\x15\x15@@@\x87\x87\x87\x87\x87\x87\x87\x87\x87WWWWWWWWWVVVVVVVVVVV\x15\x15\x15'
'0000000000000000\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x1577777777777777777777777777777777'
'777777777777777777777777777777777777777777777777777777\x15\x15\x1b\x1b\x1b\x1b\x1b\x1b\x15\x15'
'h000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000\x87\x8700000000000000000'
'\xd400000000000000000000000000\x99n\x15\x15\x1500000000000000000000000000000000'
'0000000000000000000000000000000000000000000\x87\x87\x87JJJ00000000\x15\x15\x15\x15\x15\x15\x15'
'0000000000000\x150000@@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15000000000000000000@@@\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150000000000000\x15000\x15@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000@@:@@@@@@@::'
'::::::@::@@@@@@@@@@@\x87\x87\x87$\x87\x87\x87\xa10@\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15[[[[[[[[[[\x15\x15\x15\x15\x15\x15'
'\x8e\x8e\x8e\x8e\x8e\x8eh\x8e\x8e\x8e\x8e@@@\x08\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x1500000000000000000000000000000000'
'000$0000000000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15'
'00000000000000000000000000000000000000000@0\x15\x15\x15\x15\x150000000000000000'
'000000000000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000\x15@@@::::@@:::\x15\x15\x15\x15::@::::::@@@\x15\x15\x15\x15'
'\xcc\x15\x15\x15\x8e\x8eFFFFFFFFFF000000000000000000000000000000\x15\x1500000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'00000000000000000000000000000000000000000000\x15\x15\x15\x150000000000000000'
'0000000000\x15\x15\x15\x15\x15\x15FFFFFFFFFFW\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'00000000000000000000000@@::@\x15\x15\x87\x8700000000000000000000000000000000'
'000000000000000000000:@:@@@@@@@\x15@:@::@@@@@@@@::::::@@@@@@@@@@\x15\x15@'
'FFFFFFFFFF\x15\x15\x15\x15\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x87\x87\x87\x87\x87\x87\x87$\x87\x87\x87\x87\x87\x87\x15\x15@@@@@@@@@@@@@@<\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'@@@@:00000000000000000000000000000000000000000000000@:@@@@@:@:::'
'::@::0000000\x15\x15\x15\x15FFFFFFFFFF\x87\x87\x87\x87\x87\x87\x87\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7@@@@@@@@@\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\x15\x15\x15'
'@@:000000000000000000000000000000:@@@@::@@:@@@00FFFFFFFFFF000000'
'00000000000000000000000000000000000000@:@@:::@:@@@::\x15\x15\x15\x15\x15\x15\x15\x15\x87\x87\x87\x87'
'000000000000000000000000000000000000::::::::@@@@@@@@::@@\x15\x15\x15\x87\x87\x87\x87\x87'
'FFFFFFFFFF\x15\x15\x15000FFFFFFFFFF000000000000000000000000000000$$$$$$\x87\x87'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x87\x87\x87\x87\x87\x87\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15@@@\x87@@@@@@@@@@@@@:@@@@@@@0000@0000::@00\x15@@\x15\x15\x15\x15\x15\x15'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b%%%%%%%%%%%%%%%%%%%%'
'%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b%\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\x15\x15\x15\x15\x15\x15@@@@'
'7\<KEY>'
'7\<KEY>'
'7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b'
'7\<KEY>'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b77777777\x1b\x1b\x1b\x1b\x1b\x1b\x15\x15777777\x15\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b77777777\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b77777777'
'\x1b\x1b\x1b\x1b\x1b\x1b\x15\x15777777\x15\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x157\x157\x157\x157\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b77777777\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x15\x15'
'\<KEY>'
'\xa8\xa8\x1b\x1b\x1b\x15\x1b\x1b77774\xa8\xa8\xa8\x1b\x1b\x1b\x1b\x15\x15\x1b\x1b7777\x15\xa8\xa8\xa8\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b77777\xa8\xa8\xa8\x15\x15\x1b\x1b\x1b\x15\x1b\x1b77774\xa8\xa8\x15'
'\xd4\xd4\xd4\xd4\xd4\xd4\xd4\xd4\xd4\xd4\xd4\x08\x08\x08\n\x11ghhggg\x88\x8evs\x98wur\x98w\x88\x88\x88\x8e\x89\x88\x88\x89\xd0\xd1\x0b\x12\x0f\r\x14\xd2\x82\x84\x82\x82\x84\x88\x8e\x8e\x8ext\x88\x8e\x8e\x88a'
'a\x8e\x8e\x8e\xac\x99n\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\xb9\x8ea\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\xd4\x08\x08\x08\x08\x08\x15\x0c\x13\t\x10\x08\x08\x08\x08\x08\x08T%\x15\x15RTTTTT\xae\xae\xb9\x99n '
'TRRRRTTTTT\xae\xae\xb9\x99n\x15%%%%%%%%%%%%%\x15\x15\x15\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa0\xa1\xa1\x9e\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@@@@@@@@@@@@@<<<<@<<<@@@@@@@@@@@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc7\xc9\xcc\xc9\xcc7\xcc\xc9\x1b777\x1b\x1b777\x19\xcc7\xc9\xcc\xbb77777\xcc\xcc\xcc\xc9\xc9\xcc7\xcc5\xcc7\xcc7577\xc3\x1b7777\x1b0000\x1b\xcc\xcc\x1b\x1b77'
'\xba\xb9\xb9\xb9\xb97\x1b\x1b\x1b\x1b\xcc\xb9\xcc\xcc\x1b\xc7[[[YY[[[[[[YYYY[HHHHHHHHHHHHKKKKIIIIIIIIIILLLLLL'
'JJJ7\x1bJJJJY\xcc\xcc\x15\x15\x15\x15\xb4\xb4\xb4\xb4\xb4\xc9\xc9\xc9\xc9\xc9\xb9\xb9\xcc\xcc\xcc\xcc\xb9\xcc\xcc\xb9\xcc\xcc\xb9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xb9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xb9\xb9\xcc\xcc\xb4\xcc\xb4\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xb4\xba\xb5\xb5\xba\xb9\xb9\xb4\xb5\xba\xba\xb5\xba\xba\xb9\xb4\xb9\xb5\xae\xb2\xb9\xb5\xba\xb9\xb9\xb9\xb5\xba\xba\xb5\xb4\xb5\xb5\xba\xba\xb4\xba\xb4\xba\xb4\xb4\xb4\xb4\xb5\xb5\xba\xb5\xba\xba\xba\xba\xba\xb4\xb4\xb4\xb4\xb9\xba\xb9\xba\xb5\xb5\xba\xba'
'\xba\xba\xba\xba\xba\xba\xba\xba\xb5\xba\xba\xba\xb5\xb9\xb9\xb9\xb9\xb9\xb5\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xb5\xb4\xba\xb9\xb5\xb5\xb5\xb5\xba\xba\xb5\xb5\xb9\xb9\xb5\xb5\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba'
'\xba\xba\xb5\xb5\xba\xba\xb5\xb5\xba\xba\xba\xba\xba\xb9\xb9\xba\xba\xba\xba\xb9\xb9\xb4\xb9\xb9\xba\xb4\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xb9\xb4\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xba\xb5'
'\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xb9\xb9\xba\xba\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x99n\x99n\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xba\xba\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x9cq\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xcc\xb9\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc7\xcc\xcc\xcc\xcc\xcc\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xb9\xb9\xb9\xb9\xb9\xb9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15ZZZZZZZZZYYYYYYYYYYYZZZZZZZZZYYY'
'YYYYYYYYRRRRRRRRRQQQQQQQQQQQ\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5'
'\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\\YYYYYYYYYYZZZZZZZZZYZ'
'\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9'
'\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xcc\xcc\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc\xc9\xb4\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc'
'\xc9\xb4\xcc\xcc\xcc\xcc\xc9\xc9\xc9\xcc\xcc\xc9\xcc\xcc\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc\xc9\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xcc\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xc9\xcc\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xc9\xc9\xc9\xcc\xc9\xc9\xc9\xc9\xcc\xc9\xc9\xcc\xb4\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc7\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9'
'\xcc\xcc\xcc\xcc\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xcc\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xcc\xc9\xcc\xcc\xcc\xcc\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9\xc9'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x99n\x99n\x99n\x99n\x99n\x99n\x99nZZZZZZZZZY'
'\\\\\\\\\\\\\\\\\\[\\\\\\\\\\\\\\\\\\[\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xba\xb9\xb9\xba\xba\x99n\xb9\xba\xba\xb9\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xb9\xb9\xb9\xba\xba\xba\xba\x9ao\x9ao\x9ao\x9ao\x99n\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xb9\xb9\xb9\x99n\x9ao\x99n\x99n\x99n\x99n\x99n\x99n\x99n\x99n\x99n\xb9\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xba\xba\xba\xba\xba\xba\xb9\xb9\xb9\xba\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xb9\xba\xba\xb9\xb9\x99n\x99n\xba\xb9\xb9\xb9\xb9\xba\xb9\xba\xba\xba\xb9\xb9\xba\xba\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xba\xb9\xb9\x99n\xb9\xb9'
'\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xba\xba\xba\xba\xb9\xb9\xba\xb9\xba\xb9\xb9\xba\xb9\xba\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xba\xba\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xb9'
'\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xba\xba\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xb9\xba\xba\xb9\xb9\xba\xba\xb9\xb9\xb9\xb9\xba\xba\xba\xba\xba\xba\xba'
'\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba'
'\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xba\xb9\xba\xb9\xb9\xb9\xba\xba\xba\xba\xba\xb9\xb9\xb9\xb9\xb9\xba\xba\xba\xb9\xb9\xb9\xb9\xba\xb9\xb9\xb9\xba\xba\xba\xba\xba\xb9\xba\xb9\xb9'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9\xb9'
'\xb9\xb9\xb9\xb9\xb9\xcc\xcc\xb9\xb9\xb9\xb9\xb9\xb9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xc9\xc9\xc9\xc9\xc9\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'77777777777777777777777777777777777777777777777\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\<KEY>'
'<KEY>'
'7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b\x1b\xcc\xcc\xcc\xcc\xcc\xcc7\x1b7\x1b@@@7\x1b\x15\x15\x15\x15\x15\x8e\x8e\x8e\x8e[\x8e\x8e'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x15\x1b\x15\x15\x15\x15\x15\x1b\x15\x150000000000000000'
'0000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15$\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@'
'00000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x150000000\x150000000\x150000000\x150000000\x15'
'0000000\x150000000\x150000000\x150000000\x15@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
'\x8e\x8extxt\x8e\x8e\x8ext\x8ext\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8eh\x8e\x8eh\x8ext\x8e\x8ext\x99n\x99n\x99n\x99n\x8e\x8e\x8e\x8e\x8e(\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8e\x8ehh\x8e\x8e\x8e\x8e'
'h\x8e\x98\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\x15\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\x15\x15\x15\x15'
'\xd3\x92\x92\x92\xce&1M\x9cq\x9cq\x9cq\x9cq\x9cq\xce\xce\x9cq\x9cq\x9cq\x9cqi\x9bpp\xceMMMMMMMMMAAAA;;i&&&&&\xce\xceMMM&1\x92\xce\xcc'
'\x15111111111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111\x15\x15AA\xaa\xaa&&1i1111111111111111111111111111111'
'11111111111111111111111111111111111111111111111111111111111\x92&&&1'
'\x15\x15\x15\x15\x1511111111111111111111111111111111111111111\x15\x15\x15111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'111111111111111\x15\xc8\xc8XXXX\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8111111111111111111111111111\x15\x15\x15\x15\x15'
'\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x151111111111111111'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xce\xce\x15XXXXXXXXXX\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8UUUUUUUU\xce]]]]]]]]]]]]]]]\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xce\xce\xce\xc8'
'XXXXXXXXXX\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8]]]]]]]]]]]]]]]'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xce\xce\xce\xce\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\x15'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xce\xce\xce\xce\xc8\xc8\xc8\xc8\xc8'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xce\xce\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xce'
'1111121111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1112111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111112111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111211111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'2112111212111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111121111111111111111111111111111111111'
'1111111111112111111121211111111111111111111111111111111111111112'
'2111111111111111111111111111111211111111211111111111111111111111'
'1111111111111211111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111112111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111211111111111111111111111111111111111111111111111111111111111'
'1111112111111111111111111111111111111111121212111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1212221111112111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1222211111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111112111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111211111112111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111211111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111122'
'1111111111112221211111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111121'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111112111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111112111111111111111111111111111111111111111111111'
'1111112111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111211111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111121'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111112111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111121111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111211112111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111112111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111112111111111111111111111111111111'
'1111112111112111111111111111111111111111111111111111111121111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111112111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'111111111111111111111&111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111\x15\x15\x15\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce\xce'
'\xce\xce\xce\xce\xce\xce\xce\x15\x15\x15\x15\x15\x15\x15\x15\x150000000000000000000000000000000000000000$$$$$$\x87\x87'
'000000000000$\x8e\x8e\x8e0000000000000000FFFFFFFFFF00\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b0@<<<\x8e@@@@@@@@@@\x8e)'
'7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b%%@@00000000000000000000000000000000'
'00000000000000000000000000000000000000JJJJJJJJJJ@@\x87\x87\x87\x87\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15'
'\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8\xa8)))))))))\xa8\xa87\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b\x1b\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b'
'7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b%\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b7\x1b7\x1b77\x1b'
'7\x1b7\x1b7\x1b7\x1b)\xa5\xa57\x1b7\x1b07\x1b7\x1b\x1b\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7\x1b7777\x15\x1577777\x1b7\x1b\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150%%\x1b00000'
'00@000@0000@00000000000000000000000::@@:\xcc\xcc\xcc\xcc\x15\x15\x15\x15VVVVVV\xc7\xc7\xa1\xc2\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000\x8e\x8e\x8e\x8e\x15\x15\x15\x15\x15\x15\x15\x15'
'::00000000000000000000000000000000000000000000000000::::::::::::'
'::::@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x87\x87FFFFFFFFFF\x15\x15\x15\x15\x15\x15@@@@@@@@@@@@@@@@@@000000\x87\x87\x870\x870\x15\x15'
'FFFFFFFFFF0000000000000000000000000000@@@@@@@@\x87\x870000000000000000'
'0000000@@@@@@@@@@@::\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x8711111111111111111111111111111\x15\x15\x15'
'@@@:00000000000000000000000000000000000000000000000@::@@@@::@:::'
':\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x15$FFFFFFFFFF\x15\x15\x15\x15\x87\x8700000@$000000000FFFFFFFFFF00000\x15'
'00000000000000000000000000000000000000000@@@@@@::@@::@@\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000@00000000@:\x15\x15FFFFFFFFFF\x15\x15\x87\x87\x87\x870000000000000000$000000\xc7\xc7\xc70:@:00'
'000000000000000000000000000000000000000000000000@0@@@00@@00000@@'
'0@0\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1500$\x87\x8700000000000:@@::\x87\x870$$:@\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15000000\x15\x15000000\x15\x15000000\x15\x15\x15\x15\x15\x15\x15\x15\x150000000\x150000000\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\xa5%%%%\x1b\x1b\x1b\x1b\x1b\x1b\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'00000000000000000000000000000000000::@::@::\x87:@\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150000000000000000'
'0000000\x15\x15\x15\x150000000000000000000000000000000000000000000000000\x15\x15\x15\x15'
'\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'
'\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'
'\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'
'\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111211111112111121111111'
'1111111111111111111111111111111111111111111111111121111111111111'
'1111111111111111121211111111111111111111111111111111111111111211'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111\x16\x161111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1b\x1b\x1b\x1b\x1b\x15\x15\x15\x15\x153@3333333333\xae3333333333333\x1533333\x153\x15'
'33\x1533\x153333333333,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4\xa4'
'\xa4\xa4\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,++++++,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,m\x98'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,\x15\x15,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15,,,,,,,,,,++\x9d\xcc\x15\x15'
'>>>>>>>>>>>>>>>>\x92\x92\x92\x93\x92\x92\x92\x9bp\x92\x15\x15\x15\x15\x15\x15@@@@@@@@@@@@@@@@\x92iicc\x9bp\x9bp\x9bp\x9bp\x9bp\x9b'
'p\x9bp\x9bp\x92\x92\x9bp\x92\x92\x92\x92ccc\x80\x92\x81\x15\x92\x81\x92\x92i\x9cq\x9cq\x9cq\x86\x92\x92\xb0f\xbf\xbf\xbe\x15\x92\xa3\x86\x92\x15\x15\x15\x15+,+,+\x15+,+,+,+,+,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\x15\x15\x08'
'\x15\x8b\x8b\x83\x9f\x83\x8b\x8c\x96k\x8b\xad{d|{CCCCCCCCCC|\x8b\xb7\xb6\xb7\x8b\x8b66666666666666666666666666\x96\x8bk\xa7`'
'\xa7\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x1a\x96\xb6k\xb6\x96k\x8d\x97l\x8d\x8d.........."...............'
'..............................!!...............................\x15'
'\x15\x15......\x15\x15......\x15\x15......\x15\x15...\x15\x15\x15\x9f\x9f\xb6\xa7\xca\x9f\x9f\x15\xcb\xb8\xb8\xb8\xb8\xcb\xcb\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x0e\x0e\x0e\xcc\xc9\x15\x15'
'000000000000\x1500000000000000000000000000\x150000000000000000000\x1500\x150'
'00000000000000\x15\x1500000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15'
'\x87\x8e\x87\x15\x15\x15\x15VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\x15\x15\x15\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN[[[[\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc[[\xcc\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7@\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'00000000000000000000000000000\x15\x15\x1500000000000000000000000000000000'
'00000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@SSSSSSSSSSSSSSSSSSSSSSSSSSS\x15\x15\x15\x15'
'00000000000000000000000000000000VVVV\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150000000000000000'
'0J00000000J\x15\x15\x15\x15\x1500000000000000000000000000000000000000@@@@@\x15\x15\x15\x15\x15'
'000000000000000000000000000000\x15\x8700000000000000000000000000000000'
'0000\x15\x15\x15\x1500000000\x87JJJJJ\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'7777777777777777777777777777777777777777\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b000000000000000000000000000000000000000000000000'
'000000000000000000000000000000\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x150000000000000000'
'000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1500000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'333333\x15\x153\x1533333333333333333333333333333333333333333333\x1533\x15\x15\x153\x15\x153'
'3333333333333333333333\x15\x94^^^^^^^^33333333333333333333333\xcf\xcf^^^^^^^'
'3333333333333333333333333333333\x15\x15\x15\x15\x15\x15\x15\x15^^^^^^^^^\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x153333333333333333333\x1533\x15\x15\x15\x15\x15^^^^^'
'3333333333333333333333^^^^^^\x15\x15\x15\x8e33333333333333333333333333\x15\x15\x15\x15\x15\x94'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'33333333333333333333333333333333333333333333333333333333\x15\x15\x15\x15^^33'
'^^^^^^^^^^^^^^^^\x15\x15^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
'3@@@\x15@@\x15\x15\x15\x15\x15@@@@3333\x15333\x15333333333333333333333333333\x15\x15\x15\x15@@@\x15\x15\x15\x15@'
'____^^^^\x15\x15\x15\x15\x15\x15\x15\x15\x94\x94\x94\x94\x94\x94\x94\x94\x94\x15\x15\x15\x15\x15\x15\x1533333333333333333333333333333^^\x94'
'33333333333333333333333333333^^^\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'33333333\xcf3333333333333333333333333333@@\x15\x15\x15\x15^^^^^\x94\x94\x94\x94\x94\x94\x94\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'333333333333333333333333333333333333333333333333333333\x15\x15\x15\x8e\x8e\x8e\x8e\x8e\x8e\x8e'
'3333333333333333333333\x15\x15^^^^^^^^3333333333333333333\x15\x15\x15\x15\x15^^^^^^^^'
'333333333333333333\x15\x15\x15\x15\x15\x15\x15\x94\x94\x94\x94\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15^^^^^^^\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'3333333333333333333333333333333333333333333333333333333333333333'
'333333333\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'999999999999999999999999999999999999999999999999999\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x1d\x15\x15\x15\x15\x15\x15\x15^^^^^^'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15PPPPPPPPPOOOOOOOOOOOOOOOOOOOOOO\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
':@:00000000000000000000000000000000000000000000000000000@@@@@@@@'
'@@@@@@@\x87\x87\x87\x87\x87\x87\x87\x15\x15\x15\x15\\\\\\\\\\\\\\\\\\[[[[[[[[[[[FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@'
'@@:000000000000000000000000000000000000000000000:::@@@@::@@\x87\x87\n\x87\x87'
'\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150000000000000000000000000\x15\x15\x15\x15\x15\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15'
'@@@000000000000000000000000000000000000@@@@@:@@@@@@@@\x15FFFFFFFFFF'
'\x87\x87\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1500000000000000000000000000000000000@\x87\x870\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'@@:000000000000000000000000000000000000000000000000:::@@@@@@@@@:'
':0000\x87\x87\x87\x87\x87@@@\x87\x15\x15FFFFFFFFFF0\x870\x87\x87\x87\x15VVVVVVVVVVVVVVVVVVVV\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000\x150000000000000000000000000:::@@@::@:@@\x87\x87\x87\x87\x87\x87\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000\x150\x150000\x15000000000000000\x150000000000\x87\x15\x15\x15\x15\x15\x150000000000000000'
'0000000000000000000000000000000@:::@@@@@@@@\x15\x15\x15\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15'
'@@::\x1500000000\x15\x1500\x15\x150000000000000000000000\x150000000\x1500\x1500000\x15\x15@0::'
'@::::\x15\x15::\x15\x15:::\x15\x150\x15\x15\x15\x15\x15\x15:\x15\x15\x15\x15\x1500000::\x15\x15@@@@@@@\x15\x15\x15@@@@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000000000000000000000000000000000:::@@@@@@:@::::@'
'@:@@00\x870\x15\x15\x15\x15\x15\x15\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'00000000000000000000000000000000000000000000000:::@@@@\x15\x15::::@@:@'
'@\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x87\x870000@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000000000000000000000000000000000:::@@@@@@@@::@:@'
'@\x87\x87\x870\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000@:@::@@@@@@:@\x15\x15\x15\x15\x15\x15\x15\x15'
'FFFFFFFFFF\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'00000000000000000000000000\x15\x15\x15@@@::@@@@:@@@@@\x15\x15\x15\x15FFFFFFFFFFVV\x87\x87\x87\xc7'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x1577777777777777777777777777777777'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1bFFFFFFFFFFVVVVVVVVV\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ'
'JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ\x15\x87\x87\x87\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'00000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000000000000000000000000000000000000000000\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000\x15FFFFFFFFFF\x15\x15\x15\x15\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15000000000000000000000000000000\x15\x15@@@@@\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'000000000000000000000000000000000000000000000000@@@@@@@\x87\x87\x87\x87\x87\xc7\xc7\xc7\xc7'
'$$$$\x87\xc7\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15FFFFFFFFFF\x15VVVVVVV\x15000000000000000000000\x15\x15\x15\x15\x15000'
'0000000000000000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000000000000'
'00000\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x150::::::::::::::::::::::::::::::::::::::::::::::\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@@@@$$$$$$$$$$$$$\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'11\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000\x15\x15\x15\x15\x150000000000000\x15\x15\x15'
'000000000\x15\x15\x15\x15\x15\x15\x150000000000\x15\x15\xc7@@\x87\x08\x08\x08\x08\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\x15\x15\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7::@@@\xc7\xc7\xc7::::::\x08\x08\x08\x08\x08\x08\x08\x08@@@@@'
'@@@\xc7\xc7@@@@@@@\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7@@@@\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc@@@\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15VVVVVVVVVVVVVVVVVV\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'77777777777777777777777777\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b777777777777'
'77777777777777\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b777777777777777777777777'
'77\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b7\x1577\x15\x157\x15\x1577\x15\x157777\x1577777777\x1b\x1b\x1b\x1b\x15\x1b\x15\x1b\x1b\x1b'
'\<KEY>'
'\x1b\x1b\x1b\x1b77\x157777\x15\x1577777777\x157777777\x15\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b77\x157777\x15'
'77777\x157\x15\x15\x157777777\x15\x1b\x1b\x1b\x1b\<KEY>'
'<KEY>'
'\<KEY>'
'\<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'\<KEY>'
'\<KEY>'
'\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\xba\x1b\x1b\x1b\x1b\x1b\x1b7777777777777777777777777\xb3\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b'
'\x1b\x1b\x1b\xba\x1b\x1b\x1b\x1b\x1b\x1b7\x1b\x15\x15DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD'
'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\xc7\xc7\xc7\xc7@@@@@'
'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7@\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc7\xc7\xc7\xc7@\xc7\xc7\x87\x87\x87\x87\x87\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15@@@@@\x15@@@@@@@@@@@@@@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'3333333333333333333333333333333333333333333333333333333333333333'
'3333333333333333333333333333333333333333333333333333333333333333'
'3333333333333333333333333333333333333333333333333333333333333333'
'33333\x15\x15^^^^^^^^^@@@@@@@\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
',,,,\x15,,,,,,,,,,,,,,,,,,,,,,,,,,,\x15,,\x15,\x15\x15,\x15,,,,,,,,,,\x15,,,,\x15,\x15,\x15\x15\x15\x15'
'\x15\x15,\x15\x15\x15\x15,\x15,\x15,\x15,,,\x15,,\x15,\x15\x15,\x15,\x15,\x15,\x15,\x15,,\x15,\x15\x15,,,,\x15,,,,,,,\x15,,,,\x15,,,,\x15,\x15'
',,,,,,,,,,\x15,,,,,,,,,,,,,,,,,\x15\x15\x15\x15\x15,,,\x15,,,,,\x15,,,,,,,,,,,,,,,,,\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xb9\xb9\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'RRRRRRRRRRR[[\x15\x15\x15\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc7\x15\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5'
'\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc4\xc4\xc4\xc4\xc4\xc4\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xcc\xcc\x15\x15\x15\x15\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5'
'\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc5\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\xc4\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7\xc7'
'\xc8\xc8\xc8\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\x15\x15\x15\x15\x15'
'\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8\x15\x15\x15\x15\x15\x15\x15\xc8\xc8\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xa8\xa8\xa8\xa8\xa8'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc'
'\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\xcc\xcc\xcc\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\xcc\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'1211111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111112111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111211111111111111111111111111111'
'1111111111111111111111111111111112111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111112111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1112111111112111111111111111211111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111112111111111111111111211'
'1111111111111111111111111211111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111211111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111112111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111112111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111112111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'11111111111111111111111111111111111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111\x16\x1611111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111111111111111111111111111111111'
'1111111111111111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'111111111111111111111111111111\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16'
'\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x16\x15\x15'
'\x15\x08\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08'
'\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15\x15'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17'
'\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x17\x15\x15'
)
def _get_record(code):
return _db_records[ord(_db_pages[(ord(_db_pgtbl[code >> 8]) << 8) + (code & 255)])]
def category(code): return _get_record(code)[0]
def bidirectional(code): return _get_record(code)[1]
def east_asian_width(code): return _get_record(code)[2]
def isspace(code): return _get_record(code)[3] & 1 != 0
def isalpha(code): return _get_record(code)[3] & 2 != 0
def islinebreak(code): return _get_record(code)[3] & | |
' + str(launch_args))
d.launch_args = launch_args
d.load_preferences()
cache = d.cache_load()
prompt = d.prefs['prompt']
out = d.menu(cache,prompt).strip()
aliased = False
if len(out) > 0:
if d.debug:
print("First menu closed with user input: " + out)
# Check if the action relates to a plugin
plugins = load_plugins(d.debug)
plugin_hook = False
for plugin in plugins:
if hasattr(plugin['plugin'], 'is_submenu') and plugin['plugin'].is_submenu == True:
pluginTitle = d.prefs['indicator_submenu'] + ' ' + plugin['plugin'].title.strip()
else:
pluginTitle = plugin['plugin'].title.strip()
if out[:len(pluginTitle)] == pluginTitle:
plugin_hook = (plugin["plugin"], pluginTitle)
# Check for plugin call
if plugin_hook != False:
plugin_hook[0].load_preferences()
plugin_hook[0].run(out[len(plugin_hook[1]):].strip())
if d.debug:
print("This command refers to a plugin")
else:
if d.debug:
print("This command is not related to a plugin")
# Check to see if the command is an alias for something
if d.retrieve_aliased_command(out) is not None:
# If the user wants frequently used items, store this execution (before de-aliasing)
if d.prefs['frequently_used'] > 0:
frequent_commands_store(out)
out = d.retrieve_aliased_command(out)
aliased = True
else:
# Check for store modifications
# Dont allow command aliases that add new commands
if out[0] in "+-":
if d.debug:
print("Detected command as an attempt to modify the cache")
action = out[0]
out = out[1:]
# tmp is used to split the input into a command and alias (if any)
tmp = out.split('#')
command = tmp[0]
if len(tmp) > 1:
alias = ' '.join(tmp[1:]).lstrip().rstrip()
else:
alias = None
if d.debug:
print("out = '" + str(out) + "'")
print("tmp = '" + str(tmp) + "'")
print("action = '" + str(action) + "'")
print("command = '" + str(command) + "'")
print("alias '= " + str(alias) + "'")
# Check to see if the item is in the include_items list
found_in_store = False
if d.debug:
print("Command = '" + str(command) + "', alias = '" + str(alias) + "'")
print("Starting to match given command with store elements")
for item in d.prefs['include_items']:
if action == '+' and type(item) == list:
if d.debug:
print("Is (+) " + str(item[0]) + " == " + str(alias) + "?")
if alias == item[0]:
if d.debug:
print("Yes")
found_in_store = True
break
elif d.debug:
print("No")
# If removing a command - an alias would be detected as a command
if action == '-' and type(item) == list:
if d.debug:
print("Is (-) " + str(d.format_alias(item[0], item[1])) + " == " + str(command) + "?")
if command == d.format_alias(item[0], item[1]):
found_in_store = True
alias = command
if d.prefs['indicator_alias'] != '':
alias = alias[len(d.prefs['indicator_alias'])+1:]
command = item[1]
if d.debug:
print("Yes")
print("Command is now: " + str(command))
print("Alias is now: " + str(alias))
break
elif d.debug:
print("No")
if d.debug:
print("Is (-) " + str(d.format_alias(item[0], item[1])) + " == " + str(out) + "?")
if out == d.format_alias(item[0], item[1]):
found_in_store = True
alias = item[0]
if d.prefs['indicator_alias'] != '':
alias = alias[len(d.prefs['indicator_alias'])+1:]
command = item[1]
if d.debug:
print("Yes")
print("Command is now: " + str(command))
print("Alias is now: " + str(alias))
break
elif d.debug:
print("No")
if d.debug:
print("Is (-) " + str(item[0]) + " == " + str(command) + "?")
if command == item[0]:
found_in_store = True
alias = item[0]
if d.prefs['indicator_alias'] != '':
alias = alias[len(d.prefs['indicator_alias'])+1:]
command = item[1]
if d.debug:
print("Yes")
print("Command is now: " + str(command))
print("Alias is now: " + str(alias))
break
elif d.debug:
print("No")
if type(item) != list:
if d.debug:
print("Is " + str(item) + " == " + str(command) + "?")
if command == item:
if d.debug:
print("Yes")
found_in_store = True
break
elif d.debug:
print("No")
if action == '+' and found_in_store == True:
option = d.prefs['indicator_submenu'] + " Remove from store"
if alias is None:
answer = d.menu("Command '" + str(command) + "' already in store\n"+option)
else:
answer = d.menu("Alias '" + str(alias) + "' already in store\n"+option)
if answer != option:
sys.exit()
action = '-'
elif action == '-' and found_in_store == False:
option = d.prefs['indicator_submenu'] + " Add to store"
if alias is None:
answer = d.menu("Command '" + str(command) + "' was not found in store\n"+option)
else:
answer = d.menu("Alias '" + str(alias) + "' was not found in store\n"+option)
if answer != option:
sys.exit()
action = '+'
cache_scanned = d.cache_open(file_cache)[:-1]
if cache_scanned == False:
d.cache_regenerate()
d.message_close()
sys.exit()
else:
cache_scanned = cache_scanned.split("\n")
if action == '+':
if alias is None:
if d.debug:
print("Adding '" + str(command) + "' to store")
d.prefs['include_items'].append(command)
d.message_open("Adding item to store: " + str(command))
cache_scanned = [command] + cache_scanned
else:
if d.debug:
print("Adding aliased command '" + str([alias, command]) + "' to store")
d.prefs['include_items'].append([alias, command])
# And add the item to the alias lookup file
aliases = d.load_json(file_cache_aliasesLookup)
if [alias, command] not in aliases:
aliases.append([alias, command])
d.save_json(file_cache_aliasesLookup, aliases)
d.message_open("Adding aliased item item to store: " + str(d.format_alias(alias, command)))
cache_scanned = [d.format_alias(alias, command)] + cache_scanned
cache_scanned.sort(key=len)
elif action == '-':
if alias is None:
if d.debug:
print("Will try to remove command: '" + str(command) + "' from the included items")
d.prefs['include_items'].remove(command)
d.message_open("Removing item from store: " + str(command))
try:
cache_scanned.remove(command)
except ValueError:
if d.debug:
print("Couldnt remove item from the cache")
else:
pass
else:
to_remove = None
for item in d.prefs['include_items']:
if item[0] == alias:
to_remove = item
if to_remove is not None:
if d.debug:
print("Item found and is")
print(to_remove)
d.prefs['include_items'].remove(to_remove)
else:
if d.debug:
print("Couldn't remove the item (item could not be located)")
d.message_open("Removing aliased item from store: " + str(d.format_alias(alias, command)))
try:
cache_scanned.remove(d.format_alias(alias, command))
except ValueError:
if d.debug:
print("Couldnt remove item from the cache")
else:
pass
else:
d.message_close()
d.menu("An error occured while servicing your request.\nYou may need to delete your configuration file.")
sys.exit()
d.save_preferences()
d.cache_save(cache_scanned, file_cache)
d.message_close()
# Give the user some feedback
if action == '+':
if alias is None:
message = "New item (" + command + ") added to cache."
else:
message = "New item (" + command + " aliased as '" + alias + "') added to cache."
else:
if alias is None:
message = "Existing item (" + command + ") removed from cache."
else:
message = "Existing alias (" + alias + ") removed from cache."
d.menu(message)
sys.exit()
else:
# If the user wants frequently used items, store this execution
if d.prefs['frequently_used'] > 0:
frequent_commands_store(out)
# Detect if the command is a web address and pass to handle_command
if out[:7] == 'http://' or out[:8] == 'https://' or aliased == True:
handle_command(d, out)
elif out.find(':') != -1:
if d.debug:
print("Colon detected in command, could be a path or attempt to open something with something")
print(out)
tmp = out.split(':')
cmds = list(map(lambda x: x.strip(), tmp))
run_withshell = False
shell_hold = False
if len(cmds[0]) > 0 and cmds[0][-1] == ';':
if cmds[0][-2] == ';':
shell_hold = True
if d.debug:
print('Will hold')
else:
if d.debug:
print('Wont hold')
cmds[0] = cmds[0].replace(';','')
run_withshell = True
if cmds[0] == '':
if d.debug:
print("No program specified, issuing program options to user")
items = list(filter(lambda x: x.find(cmds[1]) != -1, cache.split('\n')))
item = d.menu(items)
handle_command(d, item)
elif cmds[0] in d.scan_binaries():
if d.debug:
print('Item[0] (' + cmds[0] + ') found in binaries so will use this')
# Get paths from cache
items = list(filter(lambda x: x.find('/') != -1, cache.split('\n')))
# If extension passed, filter by this
if cmds[1] != '':
items = list(filter(lambda x: x.find(cmds[1]) != -1, items))
filename = d.menu(items)
filename = os.path.expanduser(filename)
if filename.find(' ') != -1:
filename = '"' + filename + '"'
command = cmds[0] + " " + filename
if run_withshell:
d.open_terminal(command, shell_hold)
else:
d.execute(command)
elif os.path.exists(out):
if d.debug:
print("The whole thing is a path, just open it with file_handler")
handle_command(d, out)
elif cmds[0].find('/') != -1:
if d.debug:
print("First item is a path")
if out[-1] == ':':
if d.debug:
print("User wants to be prompted with options for opening passed item")
binary = d.menu(d.scan_binaries())
command = binary + " '" + os.path.expanduser(out[:-1]) + "'"
elif cmds[1] != '':
# Check that the whole thing isn't just a path with a colon in it
command = cmds[1] + " '" + os.path.expanduser(cmds[0]) + | |
str(today.month) + "/" + str(today.day) + "/" + str(today.year)
self.visdatev.set(tstr)
self.visitdate.grid(row=8, column=3)
#prepopulate volunteer
self.volv.set(self.volunteerName)
###############self.visv.set(self....?)
#prepopulate visitor (add test to see if this exists, in case of newclient)
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
self.saveVisit.grid(row=8, column=8, sticky=W)
self.cancelVisit.grid(row=9, column=8, sticky=W)
def editvisitf(self):
"""This function sets up a display identical to the "new visit"
display, but the date, visitor, notes, and volunteer are all
prepopulated with information from the database.
"""
#gridding
self.visit_listbox.grid_forget()
self.visit_scroll.grid_forget()
self.newVisit.grid_forget()
self.editVisit.grid_forget()
self.deleteVisit.grid_forget()
#set volunteer from database
self.volv.set(self.visitDict['volunteers'][self.selectedVisit])
#set visitor from database
self.visv.set(self.visitDict['visitors'][self.selectedVisit])
#set visdatev to Visit Date from database
vdate = self.visitDict['dates'][self.selectedVisit]
self.visdatev.set(vdate)
self.visitdate.grid(row=8, column=3)
self.notescv.config(state='normal')
self.saveVisitE.grid(row=8, column=8, sticky=W)
self.cancelVisit.grid(row=9, column=8, sticky=W)
def cancelvisitf(self):
"""This function will cancel a visit/changes to a visit,
and return to the normal visit display.
"""
self.clearVisits()
d = self.visitDict["dates"]
for i in range(0, len(d)):
self.visit_listbox.insert(i, d[i])
self.visit_listbox.selection_set(0)
self.displayVisit()
def savevisitf(self):
"""This will connect to Update Visit.
"""
try:
notes = str(self.notescv.get('1.0', END))
d = str(self.visdatev.get())
da = d.split('/')
dat = date(month=int(da[0]), day=int(da[1]), year=int(da[2]))
except:
self.error_popup("Check the visit date!")
idlist = self.visitDict['ids']
vid = idlist[self.selectedVisit]
update_vis(vid, dat, notes)
#refresh screen
self.clearVisits()
pid = self.cursel
info = select_client(pid)
self.displayVisitInfo(info)
def deletevisitf(self):
"""This function will delete the selected visit, first asking
the user to confirm the action, and will update the visit display
to reflect the change. This function connects to the "delete visit"
button.
"""
conf = messagebox.askquestion(
title='Confirm Delete',
message='Are you sure you want to delete this visit?')
if conf == 'yes':
idlist = self.visitDict['ids']
vid = idlist[self.selectedVisit]
remove_visit(vid)
#refresh screen
self.clearVisits()
pid = self.cursel
info = select_client(pid)
self.displayVisitInfo(info)
return
else:
return
def cancel_changes(self):
"""This function will clear the display and refill it with
the selected client's information from the database.
"""
self.updateClientDisplay()
self.displayInfo()
return
def quitprogram(self):
"""This function safely closes the database and
interface window.
"""
quit_session()
self.ciGui.destroy()
return
def logoff(self):
"""This function closes the database and interface window,
and returns to the volunteer login page.
"""
quit_session()
self.ciGui.destroy()
vo = cdbvolunteer.VolunteerDisplay()
return
def monthlyReport(self):
generate_monthly_report()
conf = messagebox.showinfo(title='Info',
message='Your report has been generated!')
return
def yearlyReport(self):
generate_yearly_report()
conf = messagebox.showinfo(title='Info',
message='Your report has been generated!')
return
def weeklyReport(self):
generate_weekly_report()
conf = messagebox.showinfo(title='Info',
message='Your report has been generated!')
return
def customReport(self):
"""This function allows the user to enter a start and end date
for generating the report.
"""
cw = cdbcustom.customwindow()
return
def error_popup(self, errmessage):
"""This function implements a simple pop-up window to warn user
about bad data entry.
"""
conf = messagebox.showerror(title='Error', message=errmessage)
def recordVisit(self):
"""This function will insert a new visit, clear old visit
display info, and reset the visit display.
"""
#inserts new visit
try:
vol_id = self.volID #int(self.volv.get())
except ValueError:
self.error_popup("Check volunteer id")
return
#get visit date
try:
dv = (str(self.visdatev.get())).split('/')
dvm = int(dv[0])
dvd = int(dv[1])
dvy = int(dv[2])
vdate = date(year=dvy, month=dvm, day=dvd)
except ValueError:
self.error_popup("Check visit date field!\n Enter: MM/DD/YYYY")
return
#get visit notes
try:
note = self.notescv.get("1.0", END)
except ValueError:
self.error_popup("Uh, oh! Better check the visit info!")
return
#create visitData object, and call function to record new visit
visitInfo = visitData(vol_id, visitDate=vdate, notes=note)
new_visit(self.cursel, visitInfo)
#clears old visit notes
self.clearVisits()
#refreshes visit note display
info = select_client(self.cursel)
self.displayVisitInfo(info)
#"Get All Input and Test It" functions
def getVisitorInput(self, ctype, cID=None):
"""This function tests all of the data for the visitor
entry boxes and returns an object.
"""
#Error checking for visitor's name and phone
try:
fname = str(self.fnv.get())
except ValueError:
self.error_popup("Check visitor's first name!")
return
try:
lname = str(self.lnv.get())
except ValueError:
self.error_popup("Check visitor's last name!")
return
try:
phnum = str(self.phv.get())
except ValueError:
self.error_popup("Check visitor's phone number!")
return
#Error checking for visitor's DOB
try:
month = str(self.mv.get())
dm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check visitor's month of birth!")
return
try:
dd = int(self.dv.get())
except ValueError:
self.error_popup("Check visitor's day of birth!")
return
try:
dy = int(self.yv.get())
except ValueError:
self.error_popup("Check visitor's year of birth!")
return
try:
DOB = date(year=dy, month=dm, day=dd)
except ValueError:
self.error_popup("Was an invalid day of birth chosen?")
return
#Error checking for datejoined
try:
dj = (str(self.datejoinv.get())).split('/')
djm = int(dj[0])
djd = int(dj[1])
djy = int(dj[2])
datejoined = date(year=djy, month=djm, day=djd)
except ValueError:
self.error_popup("Check Date Joined field!\n Enter: MM/DD/YYYY")
return
if ctype == "old":
cd = oldClientData(cID, firstname=fname, lastname=lname,
dob=DOB, phone=phnum, dateJoined=datejoined)
elif ctype == "new":
cd = newClientData(firstname=fname, lastname=lname,
dob=DOB, phone=phnum, dateJoined=datejoined)
return cd
def getMemberInput(self, clist):
"""This function tests all of the input data for members
entry boxes and returns a data object.
"""
#Error checking for datejoined
try:
dj = (str(self.datejoinv.get())).split('/')
djm = int(dj[0])
djd = int(dj[1])
djy = int(dj[2])
datejoined = date(year=djy, month=djm, day=djd)
except ValueError:
self.error_popup("Check Date Joined field!\n Enter: MM/DD/YYYY")
return
#Check to see if any
if self.memDict != {}:
mfname = self.memDict["first"]
mlname = self.memDict["last"]
mm = self.memDict["mm"]
dd = self.memDict["dd"]
yy = self.memDict["yy"]
phnum = self.memDict["phone"]
for i in range(0, len(mfname)):
try:
fname = str(mfname[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s first name!")
return
try:
lname = str(mlname[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s last name!")
return
try:
phn = str(phnum[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s phone!")
return
try:
month = str(mm[i].get())
dm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check family member "+str(i)\
+"'s month of birth!")
return
try:
dday = int(dd[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)\
+"'s day of birth!")
return
try:
dy = int(yy[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)\
+"'s year of birth!")
return
try:
DOB = date(year=dy, month=dm, day=dday)
except ValueError:
self.error_popup("Was an invalid day of birth chosen for"\
" family member "+str(i)+"?")
return
ncd = newClientData(firstname=fname, lastname=lname,
dob=DOB, phone=phn, dateJoined=datejoined)
clist.append(ncd)
return clist
def getHouseholdInput(self):
"""This function tests all input for households in the household
entry boxes, and returns a data object.
"""
#get street address
try:
streeta = str(self.adv.get())
except ValueError:
self.error_popup("Check street address!")
return
#get city
try:
citya = str(self.ctyv.get())
except ValueError:
self.error_popup("Check city!")
return
#get state
try:
statea = str(self.stav.get())
except ValueError:
self.error_popup("Check state!")
return
#get zip code
try:
zipa = int(self.zpv.get())
except ValueError:
self.error_popup("Check zip code!")
return
#get apartment number
try:
apta = str(self.apv.get())
except ValueError:
self.error_popup("Check apartment number!")
return
#get date verified
if self.mvv.get() == self.dvv.get() == self.yvv.get() == "":
datev = None
else:
#get month
try:
month = str(self.mvv.get())
vm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check month of date verified!")
return
#get day
try:
vd = int(self.dvv.get())
except ValueError:
self.error_popup("Check day of date verified!")
return
#get year
try:
vy = int(self.yvv.get())
except ValueError:
self.error_popup("Check day of date verified!")
return
#final date testing
try:
datev = date(year=vy, month=vm, day=vd)
except ValueError:
self.error_popup("Was an invalid day for date"\
+" verified chosen?")
return
houseInfo = houseData(street=streeta, city=citya, state=statea,
zip=zipa, apt=apta, dateVerified=datev)
return houseInfo
def getVisitInput(self):
"""This function tests all visit input and returns an object.
"""
#IMPLEMENT get volunteer id
try:
v = str(self.visdatev.get())
vd = v.split('/')
vdate = date(year=int(vd[2]), month=int(vd[0]), day=int(vd[1]))
except ValueError:
self.error_popup("Check the visit date!")
#get visit notes
try:
note = self.notescv.get("1.0", END)
except ValueError:
note = None
visitInfo = visitData(Vol_ID=self.volID, visitDate=vdate, notes=note)
return visitInfo
def addNew(self):
"""This function adds a new household to the database.
#NOTE: we need to check checkboxes for dummy addresses
#(domestic violence address, and homeless address)
"""
#What if one of our "gets" fail?
#Test all input and create newClientData object for visitor
cd = self.getVisitorInput("new")
clist = [cd]
newClientInfo_list = self.getMemberInput(clist)
houseInfo = self.getHouseholdInput()
visitInfo = self.getVisitInput()
#send all objects to new_household function
client_id = new_household(houseInfo, visitInfo, newClientInfo_list)
self.cursel = client_id
#refresh list of clients
self.clientlist = list_people()
#refresh screen
self.displayNewInfo(client_id)
def updateInfo(self, *args):
"""This function will update the visitor's information, the household
information, and the visit information. It will also add family members,
but it will NOT | |
cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_4.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_4_r(self):
cname = "reds_4_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_4.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_5(self):
cname = "reds_5"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_5.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_5_r(self):
cname = "reds_5_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_5.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_6(self):
cname = "reds_6"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_6.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_6_r(self):
cname = "reds_6_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_6.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_7(self):
cname = "reds_7"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_7.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_7_r(self):
cname = "reds_7_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_7.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_8(self):
cname = "reds_8"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_8.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_8_r(self):
cname = "reds_8_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_8.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_9(self):
cname = "reds_9"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_9.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def reds_9_r(self):
cname = "reds_9_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "reds_9.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1(self):
cname = "set1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_r(self):
cname = "set1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_3(self):
cname = "set1_3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_3_r(self):
cname = "set1_3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_4(self):
cname = "set1_4"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_4.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_4_r(self):
cname = "set1_4_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_4.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_5(self):
cname = "set1_5"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_5.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_5_r(self):
cname = "set1_5_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_5.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_6(self):
cname = "set1_6"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_6.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_6_r(self):
cname = "set1_6_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_6.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_7(self):
cname = "set1_7"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_7.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_7_r(self):
cname = "set1_7_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_7.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_8(self):
cname = "set1_8"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_8.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_8_r(self):
cname = "set1_8_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_8.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_9(self):
cname = "set1_9"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_9.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set1_9_r(self):
cname = "set1_9_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set1_9.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2(self):
cname = "set2"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_r(self):
cname = "set2_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_3(self):
cname = "set2_3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_3_r(self):
cname = "set2_3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_4(self):
cname = "set2_4"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_4.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_4_r(self):
cname = "set2_4_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_4.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_5(self):
cname = "set2_5"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_5.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_5_r(self):
cname = "set2_5_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_5.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_6(self):
cname = "set2_6"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_6.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_6_r(self):
cname = "set2_6_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_6.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_7(self):
cname = "set2_7"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_7.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_7_r(self):
cname = "set2_7_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_7.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_8(self):
cname = "set2_8"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_8.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set2_8_r(self):
cname = "set2_8_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set2_8.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3(self):
cname = "set3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_r(self):
cname = "set3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_10(self):
cname = "set3_10"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_10.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_10_r(self):
cname = "set3_10_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_10.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_11(self):
cname = "set3_11"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_11.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_11_r(self):
cname = "set3_11_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_11.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_12(self):
cname = "set3_12"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_12.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_12_r(self):
cname = "set3_12_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_12.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_3(self):
cname = "set3_3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "set3_3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def set3_3_r(self):
cname = "set3_3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
| |
var_name, ax, u, colorbar_index):
ax.clear()
ax.set_xlabel('$\\phi$')
ax.set_ylabel('$\\theta$')
ax.set_title(f'{var_name} averaged across $r$')
if self.contour_plot_available:
# matplotlib has problems plotting repeatedly `contourf` until version 3.3
# see https://github.com/matplotlib/matplotlib/issues/15986
theta = self.theta_label.reshape(*self.shape)[0, :, 0]
phi = self.phi_label.reshape(*self.shape)[0, 0, :]
cax = ax.contourf(phi, theta, u, cmap='magma')
ax.xaxis.set_major_locator(plt.MultipleLocator(math.pi / 6))
ax.xaxis.set_minor_locator(plt.MultipleLocator(math.pi / 12))
ax.xaxis.set_major_formatter(plt.FuncFormatter(self._longitude_formatter))
ax.yaxis.set_major_locator(plt.MultipleLocator(math.pi / 6))
ax.yaxis.set_minor_locator(plt.MultipleLocator(math.pi / 12))
ax.yaxis.set_major_formatter(plt.FuncFormatter(self._latitude_formatter))
ax.grid(which='major', linestyle='--', linewidth=0.5)
ax.grid(which='minor', linestyle=':', linewidth=0.5)
else:
# use matshow() to plot a heatmap instead
cax = ax.matshow(u, cmap='magma', interpolation='nearest')
if self.cbs[colorbar_index]:
self.cbs[colorbar_index].remove()
self.cbs[colorbar_index] = self.fig.colorbar(cax, ax=ax)
@staticmethod
def _refresh_history(ax, history, x_label='Epochs', y_label=None, title=None):
ax.clear()
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
for metric in history:
ax.plot(history[metric], label=metric)
# By default, metrics are plotted using log-scale
# If there are negative values in metrics, override `self.customization()` to change to linear-scale
ax.set_yscale('log')
ax.legend()
def new(self):
self.fig = None
self.axs = []
self.cbs = []
self.ax_metrics = None
self.ax_loss = None
return self
def set_variable_count(self, n):
r"""Manually set the number of scalar fields to be visualized;
If not set, defaults to length of ``nets`` passed to ``self.check()`` every time ``self.check()`` is called.
:param n: number of scalar fields to overwrite default
:type n: int
:return: self
"""
self.n_vars = n
return self
def unset_variable_count(self):
r"""Manually unset the number of scalar fields to be visualized;
Once unset, the number defaults to length of ``nets``
passed to ``self.check()`` every time ``self.check()`` is called.
:return: self
"""
self.n_vars = None
return self
class MonitorSphericalHarmonics(MonitorSpherical):
r"""A monitor for checking the status of the neural network during training.
:param r_min:
The lower bound of radius, i.e., radius of interior boundary.
:type r_min: float
:param r_max:
The upper bound of radius, i.e., radius of exterior boundary.
:type r_max: float
:param check_every:
The frequency of checking the neural network represented by the number of epochs between two checks.
Defaults to 100.
:type check_every: int, optional
:param var_names:
The names of dependent variables; if provided, shall be used for plot titles.
Defaults to None
:type var_names: list[str]
:param shape:
Shape of mesh for visualizing the solution.
Defaults to (10, 10, 10).
:type shape: tuple[int]
:param r_scale:
'linear' or 'log'.
Controls the grid point in the :math:`r` direction.
Defaults to 'linear'.
:type r_scale: str
:param harmonics_fn:
A mapping from :math:`\theta` and :math:`\phi` to basis functions, e.g., spherical harmonics.
:type harmonics_fn: callable
:param theta_min:
The lower bound of polar angle.
Defaults to :math:`0`
:type theta_min: float
:param theta_max:
The upper bound of polar angle.
Defaults to :math:`\pi`.
:type theta_max: float
:param phi_min:
The lower bound of azimuthal angle.
Defaults to :math:`0`.
:type phi_min: float
:param phi_max:
The upper bound of azimuthal angle.
Defaults to :math:`2\pi`.
:type phi_max: float
:param max_degree:
**DEPRECATED and SUPERSEDED** by ``harmonics_fn``.
Highest used for the harmonic basis.
:type max_degree: int
"""
def __init__(self, r_min, r_max, check_every=None, var_names=None, shape=(10, 10, 10), r_scale='linear',
harmonics_fn=None, theta_min=0.0, theta_max=math.pi, phi_min=0.0, phi_max=math.pi * 2,
# DEPRECATED
max_degree=None):
super(MonitorSphericalHarmonics, self).__init__(
r_min,
r_max,
check_every=check_every,
var_names=var_names,
shape=shape,
r_scale=r_scale,
theta_min=theta_min,
theta_max=theta_max,
phi_min=phi_min,
phi_max=phi_max,
)
if (harmonics_fn is None) and (max_degree is None):
raise ValueError("harmonics_fn should be specified")
if max_degree is not None:
warnings.warn("`max_degree` is DEPRECATED; pass `harmonics_fn` instead, which takes precedence")
self.harmonics_fn = _RealSphericalHarmonics(max_degree=max_degree)
if harmonics_fn is not None:
self.harmonics_fn = harmonics_fn
def _compute_us(self, nets, conditions):
r, theta, phi = self.r_tensor, self.theta_tensor, self.phi_tensor
us = []
for net, cond in zip(nets, conditions):
products = cond.enforce(net, r) * self.harmonics_fn(theta, phi)
u = torch.sum(products, dim=1, keepdim=True).detach().cpu().numpy()
us.append(u)
return us
@property
def max_degree(self):
try:
ret = self.harmonics_fn.max_degree
except AttributeError as e:
warnings.warn(f"Error caught when accessing {self.__class__.__name__}, returning None:\n{e}")
ret = None
return ret
class Monitor1D(BaseMonitor):
"""A monitor for checking the status of the neural network during training.
:param t_min:
The lower bound of time domain that we want to monitor.
:type t_min: float
:param t_max:
The upper bound of time domain that we want to monitor.
:type t_max: float
:param check_every:
The frequency of checking the neural network represented by the number of epochs between two checks.
Defaults to 100.
:type check_every: int, optional
"""
def __init__(self, t_min, t_max, check_every=None):
"""Initializer method
"""
super(Monitor1D, self).__init__(check_every=check_every)
self.fig = plt.figure(figsize=(30, 8))
self.ax1 = self.fig.add_subplot(131)
self.ax2 = self.fig.add_subplot(132)
self.ax3 = self.fig.add_subplot(133)
# input for plotting
self.ts_plt = np.linspace(t_min, t_max, 100)
# input for neural network
self.ts_ann = torch.linspace(t_min, t_max, 100, requires_grad=True).reshape((-1, 1))
def check(self, nets, conditions, history):
r"""Draw 2 plots: One shows the shape of the current solution.
The other shows the history training loss and validation loss.
:param nets:
The neural networks that approximates the ODE (system).
:type nets: list[`torch.nn.Module`]
:param conditions:
The initial/boundary conditions of the ODE (system).
:type conditions: list[`neurodiffeq.ode.BaseCondition`]
:param history:
The history of training loss and validation loss.
The 'train_loss' entry is a list of training loss and 'valid_loss' entry is a list of validation loss.
:type history: dict['train': list[float], 'valid': list[float]]
.. note::
`check` is meant to be called by the function `solve` and `solve_system`.
"""
us = [
cond.enforce(net, self.ts_ann).detach().cpu().numpy()
for cond, net in zip(conditions, nets)
]
self.ax1.clear()
for i, u in enumerate(us):
self.ax1.plot(self.ts_plt, u, label=f'variable {i}')
self.ax1.legend()
self.ax1.set_title('solutions')
self.ax2.clear()
self.ax2.plot(history['train_loss'], label='training loss')
self.ax2.plot(history['valid_loss'], label='validation loss')
self.ax2.set_title('loss during training')
self.ax2.set_ylabel('loss')
self.ax2.set_xlabel('epochs')
self.ax2.set_yscale('log')
self.ax2.legend()
self.ax3.clear()
for metric_name, metric_values in history.items():
if metric_name == 'train_loss' or metric_name == 'valid_loss':
continue
self.ax3.plot(metric_values, label=metric_name)
self.ax3.set_title('metrics during training')
self.ax3.set_ylabel('metrics')
self.ax3.set_xlabel('epochs')
self.ax3.set_yscale('log')
# if there's not custom metrics, then there won't be any labels in this axis
if len(history) > 2:
self.ax3.legend()
self.fig.canvas.draw()
if not self.using_non_gui_backend:
plt.pause(0.05)
class Monitor2D(BaseMonitor):
r"""A monitor for checking the status of the neural network during training.
The number and layout of subplots (matplotlib axes) will be finalized after the first ``.check()`` call.
:param xy_min:
The lower bound of 2 dimensions.
If we only care about :math:`x \geq x_0` and :math:`y \geq y_0`, then `xy_min` is `(x_0, y_0)`.
:type xy_min: tuple[float, float], optional
:param xy_max:
The upper bound of 2 dimensions.
If we only care about :math:`x \leq x_1` and :math:`y \leq y_1`, then `xy_min` is `(x_1, y_1)`.
:type xy_max: tuple[float, float], optional
:param check_every:
The frequency of checking the neural network represented by the number of epochs between two checks.
Defaults to 100.
:type check_every: int, optional
:param valid_generator:
The generator used to sample points from the domain when visualizing the solution.
The generator is only called once (during instantiating the generator), and its outputs are stored.
Defaults to a 32x32 ``Generator2D`` with method 'equally-spaced'.
:type valid_generator: neurodiffeq.generators.BaseGenerator
:param solution_style:
- If set to 'heatmap', solution visualization will be a contour heat map of
:math:`u` w.r.t. :math:`x` and :math:`y`. Useful when visualizing a 2-D spatial solution.
- If set to 'curves', solution visualization will be :math:`u`-:math:`x` curves instead of a 2d heat map.
Each curve corresponds to a :math:`t` value. Useful when visualizing 1D spatio-temporal solution.
The first coordinate is interpreted as :math:`x` and the second as :math:`t`.
Defaults to 'heatmap'.
:type solution_style: str
:param equal_aspect:
Whether to set aspect ratio to 1:1 for heatmap. Defaults to True.
Ignored if `solutions_style` is 'curves'.
:type equal_aspect: bool
:param ax_width:
Width for each solution visualization. Note that this is different from width for metrics history,
which is equal to ``ax_width`` :math:`\times` ``n_cols``.
:type ax_width: float
:param ax_height: Height for each solution visualization and metrics history plot.
:type ax_height: float
:param n_col:
Number of solution visualizations to plot in each row.
Note there is always only 1 plot for metrics history plot per row.
:type n_col: int
"""
def __init__(self, xy_min, xy_max, check_every=None, valid_generator=None, solution_style='heatmap',
equal_aspect=True, ax_width=5.0, ax_height=4.0, n_col=2):
"""Initializer method
"""
super(Monitor2D, self).__init__(check_every=check_every)
if solution_style not in ['heatmap', 'curves']:
raise ValueError(f"Unsupported 'solution_style' = {solution_style}")
if not _updatable_contour_plot_available() and solution_style == 'heatmap':
warnings.warn("Heatmap-style solution does not work with your matplotlib version. "
"Please upgrade matplotlib to v3.3.0 or higher. "
"Otherwise you may experience buggy behavior.",
UserWarning)
self.solution_style = solution_style
self.fig = None
self.ax_width = ax_width
self.ax_height = ax_height
self.n_col = n_col
self.equal_aspect = equal_aspect
self.axs = [] # subplots
# self.caxs | |
'''
Name: color_segmentation.py
Version: 1.0
Summary: K-means color clustering based segmentation. This is achieved
by converting the source image to a desired color space and
running K-means clustering on only the desired channels,
with the pixels being grouped into a desired number
of clusters.
Author: <NAME>
Author-email: <EMAIL>
Created: 2018-05-29
USAGE:
python3 demo_color_seg.py -p ~/plant-image-analysis/test/ -ft JPG
'''
# import the necessary packages
import os
import glob
import argparse
from sklearn.cluster import KMeans
from skimage.feature import peak_local_max
from skimage.morphology import watershed, medial_axis
from skimage import img_as_float, img_as_ubyte, img_as_bool, img_as_int
from skimage import measure
from skimage.segmentation import clear_border
from scipy.spatial import distance as dist
from scipy import optimize
from scipy import ndimage
import math
import numpy as np
import argparse
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import warnings
warnings.filterwarnings("ignore")
import concurrent.futures
import multiprocessing
from multiprocessing import Pool
from contextlib import closing
MBFACTOR = float(1<<20)
# generate foloder to store the output results
def mkdir(path):
# import module
import os
# remove space at the beginning
path=path.strip()
# remove slash at the end
path=path.rstrip("\\")
# path exist? # True # False
isExists=os.path.exists(path)
# process
if not isExists:
# construct the path and folder
#print path + ' folder constructed!'
# make dir
os.makedirs(path)
return True
else:
# if exists, return
#print path+' path exists!'
return False
def color_cluster_seg(image, args_colorspace, args_channels, args_num_clusters, min_size):
# Change image color space, if necessary.
colorSpace = args_colorspace.lower()
if colorSpace == 'hsv':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif colorSpace == 'ycrcb' or colorSpace == 'ycc':
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
elif colorSpace == 'lab':
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
else:
colorSpace = 'bgr' # set for file naming purposes
# Keep only the selected channels for K-means clustering.
if args_channels != 'all':
channels = cv2.split(image)
channelIndices = []
for char in args_channels:
channelIndices.append(int(char))
image = image[:,:,channelIndices]
if len(image.shape) == 2:
image.reshape(image.shape[0], image.shape[1], 1)
(width, height, n_channel) = image.shape
#print("image shape: \n")
#print(width, height, n_channel)
# Flatten the 2D image array into an MxN feature vector, where M is the number of pixels and N is the dimension (number of channels).
reshaped = image.reshape(image.shape[0] * image.shape[1], image.shape[2])
# Perform K-means clustering.
if args_num_clusters < 2:
print('Warning: num-clusters < 2 invalid. Using num-clusters = 2')
#define number of cluster
numClusters = max(2, args_num_clusters)
# clustering method
kmeans = KMeans(n_clusters = numClusters, n_init = 40, max_iter = 500).fit(reshaped)
# get lables
pred_label = kmeans.labels_
# Reshape result back into a 2D array, where each element represents the corresponding pixel's cluster index (0 to K - 1).
clustering = np.reshape(np.array(pred_label, dtype=np.uint8), (image.shape[0], image.shape[1]))
# Sort the cluster labels in order of the frequency with which they occur.
sortedLabels = sorted([n for n in range(numClusters)],key = lambda x: -np.sum(clustering == x))
# Initialize K-means grayscale image; set pixel colors based on clustering.
kmeansImage = np.zeros(image.shape[:2], dtype=np.uint8)
for i, label in enumerate(sortedLabels):
kmeansImage[clustering == label] = int(255 / (numClusters - 1)) * i
ret, thresh = cv2.threshold(kmeansImage,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh_cleaned = clear_border(thresh)
if np.count_nonzero(thresh) > 0:
thresh_cleaned_bw = clear_border(thresh)
else:
thresh_cleaned_bw = thresh
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(thresh_cleaned, connectivity = 8)
# stats[0], centroids[0] are for the background label. ignore
# cv2.CC_STAT_LEFT, cv2.CC_STAT_TOP, cv2.CC_STAT_WIDTH, cv2.CC_STAT_HEIGHT
sizes = stats[1:, cv2.CC_STAT_AREA]
Coord_left = stats[1:, cv2.CC_STAT_LEFT]
Coord_top = stats[1:, cv2.CC_STAT_TOP]
Coord_width = stats[1:, cv2.CC_STAT_WIDTH]
Coord_height = stats[1:, cv2.CC_STAT_HEIGHT]
Coord_centroids = centroids
#print("Coord_centroids {}\n".format(centroids[1][1]))
#print("[width, height] {} {}\n".format(width, height))
nb_components = nb_components - 1
#min_size = 70
max_size = width*height*0.1
img_thresh = np.zeros([width, height], dtype=np.uint8)
#for every component in the image, keep it only if it's above min_size
for i in range(0, nb_components):
'''
#print("{} nb_components found".format(i))
if (sizes[i] >= min_size) and (Coord_left[i] > 1) and (Coord_top[i] > 1) and (Coord_width[i] - Coord_left[i] > 0) and (Coord_height[i] - Coord_top[i] > 0) and (centroids[i][0] - width*0.5 < 10) and ((centroids[i][1] - height*0.5 < 10)) and ((sizes[i] <= max_size)):
img_thresh[output == i + 1] = 255
print("Foreground center found ")
elif ((Coord_width[i] - Coord_left[i])*0.5 - width < 15) and (centroids[i][0] - width*0.5 < 15) and (centroids[i][1] - height*0.5 < 15) and ((sizes[i] <= max_size)):
imax = max(enumerate(sizes), key=(lambda x: x[1]))[0] + 1
img_thresh[output == imax] = 255
print("Foreground max found ")
'''
if (sizes[i] >= min_size):
img_thresh[output == i + 1] = 255
#from skimage import img_as_ubyte
#img_thresh = img_as_ubyte(img_thresh)
#print("img_thresh.dtype")
#print(img_thresh.dtype)
#return img_thresh
return img_thresh
'''
def medial_axis_image(thresh):
#convert an image from OpenCV to skimage
thresh_sk = img_as_float(thresh)
image_bw = img_as_bool((thresh_sk))
image_medial_axis = medial_axis(image_bw)
return image_medial_axis
'''
class clockwise_angle_and_distance():
'''
A class to tell if point is clockwise from origin or not.
This helps if one wants to use sorted() on a list of points.
Parameters
----------
point : ndarray or list, like [x, y]. The point "to where" we g0
self.origin : ndarray or list, like [x, y]. The center around which we go
refvec : ndarray or list, like [x, y]. The direction of reference
use:
instantiate with an origin, then call the instance during sort
reference:
https://stackoverflow.com/questions/41855695/sorting-list-of-two-dimensional-coordinates-by-clockwise-angle-using-python
Returns
-------
angle
distance
'''
def __init__(self, origin):
self.origin = origin
def __call__(self, point, refvec = [0, 1]):
if self.origin is None:
raise NameError("clockwise sorting needs an origin. Please set origin.")
# Vector between point and the origin: v = p - o
vector = [point[0]-self.origin[0], point[1]-self.origin[1]]
# Length of vector: ||v||
lenvector = np.linalg.norm(vector[0] - vector[1])
# If length is zero there is no angle
if lenvector == 0:
return -pi, 0
# Normalize vector: v/||v||
normalized = [vector[0]/lenvector, vector[1]/lenvector]
dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2
diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2
angle = math.atan2(diffprod, dotprod)
# Negative angles represent counter-clockwise angles so we need to
# subtract them from 2*pi (360 degrees)
if angle < 0:
return 2*math.pi+angle, lenvector
# I return first the angle because that's the primary sorting criterium
# but if two vectors have the same angle then the shorter distance
# should come first.
return angle, lenvector
# Detect stickers in the image
def sticker_detect(img_ori, save_path):
'''
image_file_name = Path(image_file).name
abs_path = os.path.abspath(image_file)
filename, file_extension = os.path.splitext(abs_path)
base_name = os.path.splitext(os.path.basename(filename))[0]
print("Processing image : {0}\n".format(str(image_file)))
# save folder construction
mkpath = os.path.dirname(abs_path) +'/cropped'
mkdir(mkpath)
save_path = mkpath + '/'
print ("results_folder: " + save_path)
'''
# load the image, clone it for output, and then convert it to grayscale
#img_ori = cv2.imread(image_file)
img_rgb = img_ori.copy()
# Convert it to grayscale
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# Store width and height of template in w and h
w, h = template.shape[::-1]
# Perform match operations.
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
#(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(res)
# Specify a threshold
threshold = 0.8
# Store the coordinates of matched area in a numpy array
loc = np.where( res >= threshold)
if len(loc):
(y,x) = np.unravel_index(res.argmax(), res.shape)
(min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(res)
#print(y,x)
print(min_val, max_val, min_loc, max_loc)
(startX, startY) = max_loc
endX = startX + template.shape[1]
endY = startY + template.shape[0]
# Draw a rectangle around the matched region.
for pt in zip(*loc[::-1]):
sticker_overlay = cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,0), 1)
sticker_crop_img = img_rgb[startY:endY, startX:endX]
return sticker_crop_img, sticker_overlay
def comp_external_contour(orig, thresh, save_path):
#find contours and get the external one
#find contours and get the external one
contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_height, img_width, img_channels = orig.shape
index = 1
print("contour length {}".format(len(contours)))
list_of_pts = []
if len(contours) > 1:
'''
for ctr in contours:
list_of_pts | |
import abc
import inspect
import json
import os
import multiprocessing
import time
import numpy as np
import pandas as pd
import tensorflow as tf2
from tqdm import tqdm
from ..feature import features_from_batch_data
from ..utils.tf_ops import modify_variable_names
from ..utils.misc import time_block, colorize
from ..utils.exception import NotSamplingError
tf = tf2.compat.v1
tf.disable_v2_behavior()
class Base(abc.ABC):
"""Base class for all recommendation models.
Parameters
----------
task : str
Specific task, either rating or ranking.
data_info : `DataInfo` object
Object that contains useful information for training and predicting.
lower_upper_bound : list or tuple, optional
Lower and upper score bound for rating task.
"""
def __init__(self, task, data_info, lower_upper_bound=None):
self.task = task
self.model_built = False
if task == "rating":
self.global_mean = data_info.global_mean
if lower_upper_bound is not None:
assert isinstance(lower_upper_bound, (list, tuple)), (
"must contain both lower and upper bound if provided")
self.lower_bound = lower_upper_bound[0]
self.upper_bound = lower_upper_bound[1]
else:
self.lower_bound, self.upper_bound = data_info.min_max_rating
elif task != "ranking":
raise ValueError("task must either be rating or ranking")
self.default_prediction = (
data_info.global_mean
if task == "rating"
else 0.0
)
@abc.abstractmethod
def fit(self, train_data, **kwargs):
"""Train model on the training data.
Parameters
----------
train_data : `TransformedSet` object
Data object used for training.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, user, item, **kwargs):
"""Predict score for given user and item.
Parameters
----------
user : int or array_like
User id or batch of user ids.
item : int or array_like
Item id or batch of item ids.
Returns
-------
prediction : int or array_like
Predicted scores for each user-item pair.
"""
raise NotImplementedError
@abc.abstractmethod
def recommend_user(self, user, n_rec, **kwargs):
"""Recommend a list of items for given user.
Parameters
----------
user : int
User id to recommend.
n_rec : int
number of recommendations to return.
Returns
-------
result : list of tuples
A recommendation list, each recommendation
contains an (item_id, score) tuple.
"""
raise NotImplementedError
@abc.abstractmethod
def save(self, path, model_name, **kwargs):
"""save model for inference or retraining.
Parameters
----------
path : str
file folder path to save model.
model_name : str
name of the saved model file.
"""
raise NotImplementedError
@classmethod
@abc.abstractmethod
def load(cls, path, model_name, data_info, **kwargs):
"""load saved model for inference.
Parameters
----------
path : str
file folder path to save model.
model_name : str
name of the saved model file.
data_info : `DataInfo` object
Object that contains some useful information.
"""
raise NotImplementedError
def convert_id(self, user, item, inner_id=False):
if not isinstance(user, (list, tuple, np.ndarray, pd.Series)):
user = [user]
if not isinstance(item, (list, tuple, np.ndarray, pd.Series)):
item = [item]
if not inner_id:
user = [self.data_info.user2id.get(u, self.n_users) for u in user]
item = [self.data_info.item2id.get(i, self.n_items) for i in item]
return np.array(user), np.array(item)
def _check_unknown(self, user, item):
# unknown_user_indices = list(np.where(np.logical_or(user >= self.n_users, user < 0))[0])
# unknown_item_indices = list(np.where(np.logical_or(item >= self.n_items, item < 0))[0])
unknown_user_indices = list(np.where(user == self.n_users)[0])
unknown_item_indices = list(np.where(item == self.n_items)[0])
# unknown_user = (list(user[unknown_user_indices])
# if unknown_user_indices
# else None)
# unknown_item = (list(item[unknown_item_indices])
# if unknown_item_indices
# else None)
unknown_index = list(
set(unknown_user_indices) | set(unknown_item_indices)
)
unknown_num = len(unknown_index)
if unknown_num > 0:
# temp conversion, will convert back in the main model
# user[unknown_index] = 0
# item[unknown_index] = 0
unknown_str = (f"Detect {unknown_num} unknown interaction(s), "
# f"including user: {unknown_user}, "
# f"item: {unknown_item}, "
# f"will be handled as default prediction"
f"position: {unknown_index}")
print(f"{colorize(unknown_str, 'red')}")
return unknown_num, unknown_index, user, item
def _check_unknown_user(self, user, inner_id=False):
user_id = (
self.data_info.user2id.get(user, -1)
if not inner_id else user
)
if 0 <= user_id < self.n_users:
return user_id
else:
if not inner_id:
unknown_str = f"detect unknown user: {user}"
print(f"{colorize(unknown_str, 'red')}")
return
@staticmethod
def _check_has_sampled(data, verbose):
if not data.has_sampled and verbose > 1:
exception_str = (f"During training, "
f"one must do whole data sampling "
f"before evaluating on epochs.")
raise NotSamplingError(f"{colorize(exception_str, 'red')}")
@staticmethod
def _check_interaction_mode(recent_num, random_num):
if recent_num is not None:
assert isinstance(recent_num, int), "recent_num must be integer"
mode = "recent"
num = recent_num
elif random_num is not None:
assert isinstance(random_num, int), "random_num must be integer"
mode = "random"
num = random_num
else:
mode = "recent"
num = 10 # by default choose 10 recent interactions
return mode, num
@staticmethod
def _decide_sparse_indices(data_info):
return False if not data_info.sparse_col.name else True
@staticmethod
def _decide_dense_values(data_info):
return False if not data_info.dense_col.name else True
@staticmethod
def _sparse_feat_size(data_info):
if (data_info.user_sparse_unique is not None
and data_info.item_sparse_unique is not None):
return max(np.max(data_info.user_sparse_unique),
np.max(data_info.item_sparse_unique)) + 1
elif data_info.user_sparse_unique is not None:
return np.max(data_info.user_sparse_unique) + 1
elif data_info.item_sparse_unique is not None:
return np.max(data_info.item_sparse_unique) + 1
@staticmethod
def _sparse_field_size(data_info):
return len(data_info.sparse_col.name)
@staticmethod
def _dense_field_size(data_info):
return len(data_info.dense_col.name)
@staticmethod
def show_start_time():
start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print(f"Training start time: {colorize(start_time, 'magenta')}")
def save_params(self, path):
hparams = dict()
arg_names = list(inspect.signature(self.__init__).parameters.keys())
arg_names.remove("data_info")
for p in arg_names:
hparams[p] = self.all_args[p]
param_path = os.path.join(path, "hyper_parameters.json")
with open(param_path, 'w') as f:
json.dump(hparams, f, separators=(",", ":"), indent=4)
@classmethod
def load_params(cls, path, data_info):
if not os.path.exists(path):
raise OSError(f"file folder {path} doesn't exists...")
param_path = os.path.join(path, "hyper_parameters.json")
with open(param_path, 'r') as f:
hparams = json.load(f)
hparams.update({"data_info": data_info})
return hparams
class TfMixin(object):
def __init__(self, tf_sess_config=None):
self.cpu_num = multiprocessing.cpu_count()
self.sess = self._sess_config(tf_sess_config)
self.graph_built = False
self.vector_infer = False
def _sess_config(self, tf_sess_config=None):
if not tf_sess_config:
# Session config based on:
# https://software.intel.com/content/www/us/en/develop/articles/tips-to-improve-performance-for-popular-deep-learning-frameworks-on-multi-core-cpus.html
tf_sess_config = {
"intra_op_parallelism_threads": 0,
"inter_op_parallelism_threads": 0,
"allow_soft_placement": True,
"device_count": {"CPU": self.cpu_num}
}
# os.environ["OMP_NUM_THREADS"] = f"{self.cpu_num}"
config = tf.ConfigProto(**tf_sess_config)
return tf.Session(config=config)
def train_pure(self, data_generator, verbose, shuffle, eval_data, metrics,
**kwargs):
for epoch in range(1, self.n_epochs + 1):
with time_block(f"Epoch {epoch}", verbose):
train_total_loss = []
for user, item, label, _, _ in data_generator(
shuffle, self.batch_size
):
feed_dict = {self.user_indices: user,
self.item_indices: item,
self.labels: label}
if hasattr(self, "is_training"):
feed_dict.update({self.is_training: True})
train_loss, _ = self.sess.run(
[self.loss, self.training_op], feed_dict=feed_dict)
train_total_loss.append(train_loss)
if verbose > 1:
train_loss_str = "train_loss: " + str(
round(float(np.mean(train_total_loss)), 4)
)
print(f"\t {colorize(train_loss_str, 'green')}")
class_name = self.__class__.__name__.lower()
if class_name.startswith("svd"):
# set up parameters for prediction evaluation
self._set_latent_factors()
self.print_metrics(eval_data=eval_data, metrics=metrics,
**kwargs)
print("="*30)
def train_feat(self, data_generator, verbose, shuffle, eval_data, metrics,
**kwargs):
for epoch in range(1, self.n_epochs + 1):
if self.lr_decay:
print(f"With lr_decay, epoch {epoch} learning rate: "
f"{self.sess.run(self.lr)}")
with time_block(f"Epoch {epoch}", verbose):
train_total_loss = []
for u, i, label, si, dv in data_generator(
shuffle, self.batch_size
):
feed_dict = self._get_feed_dict(u, i, si, dv, label, True)
train_loss, _ = self.sess.run(
[self.loss, self.training_op], feed_dict)
train_total_loss.append(train_loss)
if verbose > 1:
train_loss_str = "train_loss: " + str(
round(float(np.mean(train_total_loss)), 4)
)
print(f"\t {colorize(train_loss_str, 'green')}")
self.print_metrics(eval_data=eval_data, metrics=metrics,
**kwargs)
print("="*30)
def train_seq(self):
pass # TODO: combine train_feat and train_seq
def _get_feed_dict(self, user_indices, item_indices, sparse_indices,
dense_values, label, is_training):
feed_dict = {
self.user_indices: user_indices,
self.item_indices: item_indices,
self.is_training: is_training
}
if self.sparse:
feed_dict.update({self.sparse_indices: sparse_indices})
if self.dense:
feed_dict.update({self.dense_values: dense_values})
if label is not None:
feed_dict.update({self.labels: label})
return feed_dict
def _get_seq_feed_dict(self, u_interacted_seq, u_interacted_len,
user_indices, item_indices, label, sparse_indices,
dense_values, is_training):
feed_dict = {
self.user_interacted_seq: u_interacted_seq,
self.user_interacted_len: u_interacted_len,
self.user_indices: user_indices,
self.item_indices: item_indices,
self.is_training: is_training
}
if self.sparse:
feed_dict.update({self.sparse_indices: sparse_indices})
if self.dense:
feed_dict.update({self.dense_values: dense_values})
if label is not None:
feed_dict.update({self.labels: label})
return feed_dict
def predict_data_with_feats(self, data, batch_size=None,
cold_start="average", inner_id=False):
assert isinstance(data, pd.DataFrame), "data must be pandas DataFrame"
user, item = self.convert_id(data.user, data.item, inner_id)
unknown_num, unknown_index, user, item = self._check_unknown(user, item)
if batch_size is not None:
preds = np.zeros(len(data), dtype=np.float32)
for index in tqdm(range(0, len(data), batch_size), "pred_data"):
batch_slice = slice(index, index + batch_size)
batch_data = data.iloc[batch_slice]
user_indices = user[batch_slice]
item_indices = item[batch_slice]
sparse_indices, dense_values = features_from_batch_data(
self.data_info, self.sparse, self.dense, batch_data)
if hasattr(self, "user_last_interacted"):
feed_dict = self._get_seq_feed_dict(
self.user_last_interacted[user_indices],
self.last_interacted_len[user_indices],
user_indices, item_indices,
None, sparse_indices,
dense_values, False)
else:
feed_dict = self._get_feed_dict(
user_indices, item_indices,
sparse_indices, dense_values,
None, False)
preds[batch_slice] = self.sess.run(self.output, feed_dict)
else:
sparse_indices, dense_values = features_from_batch_data(
self.data_info, self.sparse, self.dense, data)
if hasattr(self, "user_last_interacted"):
feed_dict = self._get_seq_feed_dict(
self.user_last_interacted[user],
self.last_interacted_len[user],
user, item, None, sparse_indices,
dense_values, False)
else:
feed_dict = self._get_feed_dict(
user, item, sparse_indices,
dense_values, None, False)
preds = self.sess.run(self.output, feed_dict)
if self.task == "rating":
preds = np.clip(preds, self.lower_bound, self.upper_bound)
elif self.task == "ranking":
preds = 1 / (1 + np.exp(-preds))
if unknown_num > 0 and cold_start == "popular":
preds[unknown_index] = self.default_prediction
return preds
def assign_oov(self):
(
user_variables,
item_variables,
sparse_variables,
dense_variables,
_
) = modify_variable_names(self, trainable=True)
update_ops = []
for v in tf.trainable_variables():
if user_variables is not None and | |
pattern
@time_measure
def quantstrategies(context: DataContext):
global lock_qm
totalresultdata = {}
transientresult100 = context.totalresult[DataContext.strategy100]
for sector_usd in context.markets:
resultdata = {}
sector_tmp = stock_group[sector_usd]
for symbol_tmp in context.symbols[sector_tmp]:
results = {}
try:
runStrategies(transientresult100, symbol_tmp, sector_tmp,
context, resultdata, results)
except BaseException as be:
logger.debug("runStrategies is failed, symbol is {}".format(symbol_tmp))
logger.error("runStrategies is failed, symbol is {}".format(symbol_tmp), be)
totalresultdata[sector_tmp] = resultdata
return totalresultdata
def runStrategies(transientresult100, symbol_tmp, sector_tmp, context,
resultdata, results):
with lock_qm:
length_totalresult100 = len(transientresult100)
issymbolintotalresult100 = symbol_tmp in transientresult100
if length_totalresult100 > 0:
if not issymbolintotalresult100:
return False
else:
dataset_240 = context.data240mins[sector_tmp].get(symbol_tmp)
if len(dataset_240) == 0:
return False
strategy_dayk = StrategyBasedOnDayKAction(dataset_240)
valid_240_amplitude, result_amplitude_240 = strategy_dayk.executeaction(operation='amplitude_avg')
if valid_240_amplitude:
if len(result_amplitude_240) > 0:
results[DataContext.strategy100] = result_amplitude_240
resultdata[symbol_tmp] = results
else:
return False
else:
logger.error("strategy_amplitude_avg_240 is failed on {}".format(symbol_tmp))
return False
dataset_240 = context.data240mins[sector_tmp].get(symbol_tmp)
expma_cross_240 = EXPMACrossAction(dataset_240)
valid_expma_240, value_240_expma = expma_cross_240.executeaction()
if valid_expma_240:
if len(value_240_expma) > 0:
results[DataContext.strategy8] = value_240_expma
resultdata[symbol_tmp] = results
else:
logger.error("strategy_expma_cross_240 is failed on {}".format(symbol_tmp))
dataset_30 = context.data30mins[sector_tmp].get(symbol_tmp)
expma_cross_30 = EXPMACrossAction(dataset_30)
valid_expma_30, value_30_expma = expma_cross_30.executeaction()
if valid_expma_30:
if len(value_30_expma) > 0:
results[DataContext.strategy9] = value_30_expma
resultdata[symbol_tmp] = results
else:
logger.error("strategy_expma_cross_30 is failed on {}".format(symbol_tmp))
dataset_60 = context.data60mins[sector_tmp].get(symbol_tmp)
if len(dataset_60) == 0:
return False
expma_cross_60 = EXPMACrossAction(dataset_60)
valid_expma_60, value_60_expma = expma_cross_60.executeaction()
if valid_expma_60:
if len(value_60_expma) > 0:
results[DataContext.strategy10] = value_60_expma
resultdata[symbol_tmp] = results
else:
logger.error("strategy_expma_cross_60 is failed on {}".format(symbol_tmp))
kd_60 = StrategyBasedOnKDAction(dataset_60)
valid_60_kd_cross, result_kd_cross_60 = kd_60. \
executeaction(occurrence_time=[dataset_60.index[-1]],
operation='cross_up')
if valid_60_kd_cross:
if len(result_kd_cross_60) > 0:
macd_cross_60 = StrategyBasedonMACDAction(dataset_60, 2)
valid_60_macd_cross, result_macd_cross_60 = macd_cross_60.executeaction(operation='cross_up')
if valid_60_macd_cross:
if len(result_macd_cross_60) > 0:
results[DataContext.strategy11] = result_macd_cross_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_macd_cross_up_60 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_kd_cross_up_60 is failed on {}".format(symbol_tmp))
# FIXME
'''
valid_60_kd_deviate, result_kd_deviate_60 = kd_60. \
executeaction(occurrence_time=[dataset_60.index[-1]],
operation='divergence_price_lower_and_k_higher')
if valid_60_kd_deviate:
if len(result_kd_deviate_60) > 0:
results[DataContext.strategyx] = result_kd_deviate_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_deviate_60 is failed on {}".format(symbol_tmp))
'''
valid_60_kd_deviate, result_kd_deviate_60 = kd_60. \
executeaction(occurrence_time=[dataset_60.index[-1]],
operation='divergence_price_lower_and_k_higher_simple',
duration=20)
if valid_60_kd_deviate:
if len(result_kd_deviate_60) > 0:
results[DataContext.strategy12] = result_kd_deviate_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_deviate_60 is failed on {}".format(symbol_tmp))
price_kavg_60 = StrategyBasedOnDayKAction(dataset_60)
valid_60_price_ma, result_price_ma_60 = price_kavg_60.executeaction(operation='price_k_avg')
if valid_60_price_ma:
if len(result_price_ma_60) > 0:
valid_60_entangle_crossup_period, result_entangle_crossup_period_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_and_cross_up_within_period',
periods=4,
duration=40)
if valid_60_entangle_crossup_period:
if len(result_entangle_crossup_period_60) > 0:
results[DataContext.strategy13] = result_entangle_crossup_period_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_entangle_and_cross_up_60 is failed on {}".format(symbol_tmp))
valid_60_entangle_period, result_entangle_period_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_within_period',
periods=4,
duration=40)
if valid_60_entangle_period:
if len(result_entangle_period_60) > 0:
valid_30_crossup, result_crossup_30 = \
StrategyBasedOnKDAction(dataset_30).executeaction(
occurrence_time=[dataset_30.index[-1]],
operation='cross_up')
if valid_30_crossup:
if len(result_crossup_30) > 0:
results[DataContext.strategy14] = result_crossup_30
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_cross_up_30 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_kd_entangle_60 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_price_ma_60 is failed on {}".format(symbol_tmp))
ma_go_60 = StrategyBasedOnDayKAction(dataset_60)
valid_60_ma, result_ma_60 = ma_go_60.executeaction(operation='avg_k_go')
if valid_60_ma:
if len(result_ma_60) > 0:
results[DataContext.strategy102] = result_ma_60
resultdata[symbol_tmp] = results
else:
return False
else:
logger.error("strategy_ma_avg_60 is failed on {}".format(symbol_tmp))
return False
expma_go_60 = StrategyBasedOnDayKAction(dataset_60)
valid_60_expema_dif, result_expema_dif_60 = expma_go_60.executeaction(operation='expma_dif_go')
if valid_60_expema_dif:
if len(result_expema_dif_60) > 0:
results[DataContext.strategy104] = result_expema_dif_60
resultdata[symbol_tmp] = results
else:
return False
else:
logger.error("strategy_expma_dif_go_60 is failed on {}".format(symbol_tmp))
return False
macd_go_60 = StrategyBasedonMACDAction(dataset_60, 2)
ismacd_strcit = False
ismacd_diff = False
valid_60_macd_strict, result_macd_strict_60 = macd_go_60.executeaction(operation='strict')
if valid_60_macd_strict:
if len(result_macd_strict_60) > 0:
results[DataContext.strategy101] = result_macd_strict_60
resultdata[symbol_tmp] = results
ismacd_strcit = True
else:
logger.error("strategy_macd_strict_60 is failed on {}".format(symbol_tmp))
if not ismacd_strcit:
valid_60_macd_dif, result_macd_dif_60 = macd_go_60.executeaction(operation='dif')
if valid_60_macd_dif:
if len(result_macd_dif_60) > 0:
results[DataContext.strategy103] = result_macd_dif_60
resultdata[symbol_tmp] = results
ismacd_diff = True
else:
logger.error("strategy_macd_diff_60 is failed on {}".format(symbol_tmp))
if not ismacd_strcit and not ismacd_diff:
return False
'''
dataset_30 = context.data30mins[sector_tmp].get(symbol_tmp)
kd_cross_30 = StrategyBasedOnKDAction(dataset_30)
kd_indicator_30 = KDAction(dataset_30, context.rsv_period, context.k_period, context.d_period)
valid_kd_30, k_v_30, d_v_30 = kd_indicator_30.executeaction()
ma_cross = CROSSUpMAAction(context.data15mins[sector_tmp].get(symbol_tmp))
valid, result_tmp = ma_cross.executeaction(startindex=context.start_i, endindex=context.end_i,
cross_period=context.cross_sma_period,
greater_period=context.greater_than_sma_period)
if valid:
if len(result_tmp) > 0:
time_sequence = []
for time_stamp_original in result_tmp['time'].array:
tmp_date = datetime.date(year=time_stamp_original.year, month=time_stamp_original.month,
day=time_stamp_original.day)
if time_stamp_original.minute == 0:
time_stamp = time_stamp_original
elif time_stamp_original.minute <= 30:
time_stamp = pd.Timestamp(datetime.datetime.combine(tmp_date,
datetime.time(
hour=time_stamp_original.hour,
minute=30)))
else:
time_stamp = pd.Timestamp(datetime.datetime.combine(tmp_date,
datetime.time(
hour=time_stamp_original.hour + 1)))
time_sequence.append(time_stamp)
if not valid_kd_30:
logger.error("strategy_cross_kd_30 is failed on {}".format(symbol_tmp))
else:
valid, result_tmp = kd_cross_30.executeaction(occurrence_time=time_sequence,
operation='cross_up',
KValues=k_v_30,
DValues=d_v_30,
crossvalue=(False, 0))
if valid:
# FIXME
if len(result_tmp) > 0:
obv_up = OBVUpACTION(context.data30mins[sector_tmp].get(symbol_tmp))
valid, result_tmp = obv_up.executeaction(occurrence_time=result_tmp['time'],
obv_period=context.obv_period,
obv_a_period=context.obv_a_period)
if valid:
if len(result_tmp) > 0:
results[DataContext.strategy1] = result_tmp
resultdata[symbol_tmp] = results
else:
logger.error("strategy_obv_up_30 is failed on {}".format(symbol_tmp))
if len(result_tmp) > 0:
results[DataContext.strategy1] = result_tmp
resultdata[symbol_tmp] = results
else:
logger.error("strategy_cross_kd_30 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_cross_70 is failed on {}".format(symbol_tmp))
if not valid_kd_30:
logger.error("strategy_entangle_crossup_kd_30 is failed on {}".format(symbol_tmp))
else:
valid_30_entangle_crossup_period, result_entangle_crossup_period_30 = \
kd_cross_30.executeaction(occurrence_time=[dataset_30.index[-1]],
operation='entangle_and_cross_up_within_period',
KValues=k_v_30,
DValues=d_v_30,
periods=4,
duration=80,
crossvalue=(False, 0))
if valid_30_entangle_crossup_period:
if len(result_entangle_crossup_period_30) > 0:
results[DataContext.strategy6] = result_entangle_crossup_period_30
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_crossup_kd_30 is failed on {}".format(symbol_tmp))
valid_60, result_tmp_60 = kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='cross_up',
crossvalue=(True, 30))
if valid_60:
if len(result_tmp_60) > 0:
results[DataContext.strategy2] = result_tmp_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_cross_kd_60 is failed on {}".format(symbol_tmp))
valid_60_entangle, result_entangle_60 = kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle',
crossvalue=(True, 30),
periods=4)
if valid_60_entangle:
if len(result_entangle_60) > 0:
results[DataContext.strategy3] = result_entangle_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_kd_60 is failed on {}".format(symbol_tmp))
'''
valid_60_entangle_crossup_period, result_entangle_crossup_period_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_and_cross_up_within_period',
periods=4,
duration=40)
if valid_60_entangle_crossup_period:
if len(result_entangle_crossup_period_60) > 0:
if ismacd_diff:
results[DataContext.strategy7] = result_entangle_crossup_period_60
elif ismacd_strcit:
results[DataContext.strategy5] = result_entangle_crossup_period_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_crossup_period_kd_60 is failed on {}".format(symbol_tmp))
if not ismacd_strcit:
return False
valid_60_entangle_crossup, result_entangle_crossup_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_and_cross_up',
periods=4)
if valid_60_entangle_crossup:
if len(result_entangle_crossup_60) > 0:
results[DataContext.strategy4] = result_entangle_crossup_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_crossup_kd_60 is failed on {}".format(symbol_tmp))
return True
# FIXME because EM has been obsoleted.
def calcrankofchange():
if DataContext.iscountryChina():
prefix = "B_"
current_date = datetime.datetime.now().strftime("%Y-%m-%d")
# 偏移N天交易日
date_offset = c.getdate(current_date, -11, "Market=CNSESH")
if date_offset.ErrorCode != 0:
logger.error("ErrorCode is %d and ErrorMsg is %s" % (date_offset.ErrorCode, date_offset.ErrorMsg))
return False
# 区间涨跌幅(流通市值加权平均):CPPCTCHANGEFMWAVG 区间资金净流入:PNETINFLOWSUM
sectors_q = list(sectors_CN.keys())
i = 1
sectors_length = len(sectors_q) - 6
sectors_v = []
while i < sectors_length:
j = i + 6
if j > sectors_length:
j = sectors_length
sectors_g = ",".join(map(lambda x: prefix + x, sectors_q[i:j]))
sector_data = c.cses(sectors_g, "CPPCTCHANGEFMWAVG,PNETINFLOWSUM",
"StartDate={},EndDate={}, IsHistory=0, Ispandas=1, ShowBlank=0".format(
date_offset.Data[0], current_date))
sectors_v.append(sector_data)
i += 6
logger.debug("%d sectors has been scanned" % (sectors_length - 1))
sectors_df = pd.concat(sectors_v)
sectors_df_change_d = sectors_df.sort_values(by='CPPCTCHANGEFMWAVG', ascending=False)
sectors_df_mf_d = sectors_df.sort_values(by='PNETINFLOWSUM', ascending=False)
sectors_list_change_d = sectors_df_change_d.index.tolist()
sectors_list_mf_d = sectors_df_mf_d.index.tolist()
if len(sectors_df) > 50:
list_sectors_change = sectors_list_change_d[:50]
list_sectors_change_r = sectors_list_change_d[:-51:-1]
list_sectors_mf = sectors_list_mf_d[:50]
list_sectors_mf_r = sectors_list_mf_d[:-51:-1]
else:
list_sectors_change = sectors_list_change_d
list_sectors_change_r = sectors_list_change_d[::-1]
list_sectors_mf = sectors_list_mf_d
list_sectors_mf_r = sectors_list_mf_d[::-1]
e_subject = "版块排名_" + datetime.datetime.now().strftime("%Y%m%d")
e_content = ""
filepath = os.path.join(DataContext.dir_name, e_subject)
with open(filepath, 'w+') as file:
tmp_str = "涨幅版块排名\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_change:
column = sectors_df_change_d['CPPCTCHANGEFMWAVG']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 幅度: {}% \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
tmp_str = "\r\n跌幅版块排名\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_change_r:
column = sectors_df_change_d['CPPCTCHANGEFMWAVG']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 幅度: {}% \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
tmp_str = "\r\n资金净流入版块排名 - 从高到低\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_mf:
column = sectors_df_mf_d['PNETINFLOWSUM']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 资金: {} \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
tmp_str = "\r\n资金净流入版块排名 - 从低到高\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_mf_r:
column = sectors_df_mf_d['PNETINFLOWSUM']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 资金: {} \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
sendemail(e_subject, e_content, DataContext.email_recipient)
sendemail(e_subject, e_content, DataContext.email_other1_recipient)
sendemail(e_subject, e_content, DataContext.email_other2_recipient)
def summarytotalresult(context: DataContext):
e_subject = "预警汇总_" + datetime.datetime.now().strftime("%Y%m%d")
e_content = ""
filepath = os.path.join(DataContext.dir_name, e_subject)
with open(filepath, 'w+') as file:
for strategy_t, symbols in context.totalresult.items():
str101 = ""
if strategy_t == DataContext.strategy6:
str101 = "\r\n\r\n\r\n\r\n\r\n策略6 - 30分钟周期\r\n"
elif strategy_t == DataContext.strategy14:
str101 = "\r\n\r\n\r\n\r\n\r\n策略14 - 30分钟周期:\r\n"
elif strategy_t == DataContext.strategy13:
str101 = "\r\n\r\n\r\n\r\n\r\n策略13 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy12:
str101 = "\r\n\r\n\r\n\r\n\r\n策略12 - 日周期:\r\n"
elif strategy_t == DataContext.strategy11:
str101 = "\r\n\r\n\r\n\r\n\r\n策略11 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy10:
str101 = "\r\n\r\n\r\n\r\n\r\n策略10 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy9:
str101 = "\r\n\r\n\r\n\r\n\r\n策略9 - 30分钟周期:\r\n"
elif strategy_t == DataContext.strategy8:
str101 = "\r\n\r\n\r\n\r\n\r\n策略8 - 日周期:\r\n"
elif strategy_t == DataContext.strategy7:
str101 = "\r\n\r\n\r\n\r\n\r\n策略7 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy5:
str101 = "\r\n\r\n\r\n\r\n\r\n策略5 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy4:
str101 = "\r\n\r\n\r\n\r\n\r\n策略4 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy3:
str101 = "\r\n\r\n\r\n\r\n\r\n策略3 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy1:
str101 = "\r\n\r\n\r\n\r\n\r\n策略1 - | |
<filename>wordvecs/wordvecs.py
#!/usr/bin/env python
# coding=utf-8
# Author: <NAME> <<EMAIL>> KTH 2018
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division, absolute_import, print_function
from fastText import train_unsupervised
from fastText import load_model
import os
import gensim, logging
from gensim.scripts.glove2word2vec import glove2word2vec
import errno
from glove import Corpus, Glove
from datetime import datetime
import sys
from copy import deepcopy
import re
"""
Script for training and evaluating word vectors.
Non-distributed training
"""
WORD2VEC_ANALOGIES = "eval/questions-words.txt"
WORDSIM353 = "eval/wordsim353.tsv"
FASHION_WORDSIM = "eval/fashion_wordsim.tsv"
SIMLEX99 = 'eval/simlex999.txt'
reload(sys)
sys.setdefaultencoding('utf-8')
isNumber = re.compile(r'\d+.*')
def readCorpus():
"""Reads input corpus, assumes it is already cleaned"""
with open("data/clean2_corpus.txt", 'r') as datafile:
return datafile.read()
def corpus_stats(corpus_file):
"""Computes some stats of the corpus"""
vocab = set()
total_count = 0
with open(corpus_file, 'r') as corpusFile:
text = corpusFile.read()
lines = text.split("\n")
lines = map(lambda x: x.split(" "), lines)
for line in lines:
for word in line:
total_count += 1
vocab.add(word.lower())
vocab_size = len(vocab)
return total_count, vocab_size
def accuracy_percentage(acc):
"""Utility function for pretty-printing evaluation results"""
num_questions = len(acc)
semantic_questions = range(5)
syntactic_questions = range(5, num_questions)
overall_nr_correct_answers = sum(len(acc[i]["correct"]) for i in range(num_questions))
overall_nr_incorrect_answers = sum(len(acc[i]["incorrect"]) for i in range(num_questions))
if (overall_nr_correct_answers + overall_nr_incorrect_answers) > 0:
overall_acc_percent = 100 * float(
overall_nr_correct_answers / (overall_nr_correct_answers + overall_nr_incorrect_answers))
else:
overall_acc_percent = 0
sem_nr_correct_answers = sum(len(acc[i]["correct"]) for i in semantic_questions)
sem_nr_incorrect_answers = sum(len(acc[i]["incorrect"]) for i in semantic_questions)
if (sem_nr_correct_answers + sem_nr_incorrect_answers) > 0:
sem_acc_percent = 100 * float(sem_nr_correct_answers / (sem_nr_correct_answers + sem_nr_incorrect_answers))
else:
sem_acc_percent = 0
syn_nr_correct_answers = sum(len(acc[i]["correct"]) for i in syntactic_questions)
syn_nr_incorrect_answers = sum(len(acc[i]["incorrect"]) for i in syntactic_questions)
if (syn_nr_correct_answers + syn_nr_incorrect_answers) > 0:
syn_acc_percent = 100 * float(syn_nr_correct_answers / (syn_nr_correct_answers + syn_nr_incorrect_answers))
else:
syn_acc_percent = 0
frac = "sem: {0}/{1}, syn: {2}/{3}".format(sem_nr_correct_answers,
(sem_nr_correct_answers + sem_nr_incorrect_answers),
syn_nr_correct_answers,
(syn_nr_correct_answers + syn_nr_incorrect_answers))
return overall_acc_percent, sem_acc_percent, syn_acc_percent, frac
def save_results(model, dim, context, train_model, algorithm, data, name):
"""Save evaluation results to CSV file"""
acc = model.accuracy(WORD2VEC_ANALOGIES)
overall_acc_percent, sem_acc_percent, syn_acc_percent, frac = accuracy_percentage(acc)
pearson, spearman, oov_ration = model.evaluate_word_pairs(WORDSIM353)
pearson2, spearman2, oov_ration2 = model.evaluate_word_pairs(SIMLEX99)
pearson3, spearman3, oov_ration3 = model.evaluate_word_pairs(FASHION_WORDSIM)
fields = [
str(syn_acc_percent), str(sem_acc_percent), str(overall_acc_percent),
str(pearson[0]), str(pearson[1]), str(spearman[0]), str(spearman[1]), str(oov_ration),
str(pearson2[0]), str(pearson2[1]), str(spearman2[0]), str(spearman2[1]), str(oov_ration2),
str(pearson3[0]), str(pearson3[1]), str(spearman3[0]), str(spearman3[1]), str(oov_ration3),
str(dim), str(context), str(train_model), str(algorithm), str(data), str(name)
]
strFields = ",".join(fields)
strFields = strFields + "\n"
append_to_file("results/eval/results.csv", strFields)
def test_word2vec_google_news_300():
""" Evaluate word2vec pretrained on google news"""
model = gensim.models.KeyedVectors.load_word2vec_format('pretrained/googlenews_negative_300d_100B.bin', binary=True)
name = "googlenews_negative_300d_100B"
save_results(model, 300, 5, "skipgram", "word2vec", "100billion_googlenews_en", name)
def test_fasttext_wiki_300():
""" Evaluate fastttext pretrained on Eng Wikipedia"""
model = gensim.models.KeyedVectors.load_word2vec_format("pretrained/fasttext_wiki_300d_en.vec")
name = "fasttext_wiki_300d_en"
save_results(model, 300, 5, "skipgram", "fasttext", "wiki_en", name)
def test_glove_wiki_300():
""" Evaluate Glove pretrained on Eng wikipedia"""
model = gensim.models.KeyedVectors.load_word2vec_format('pretrained/glove_wiki_6B_300d.vec', binary=False)
name = "glove_wiki_6B_300d"
save_results(model, 300, "?", "-", "glove", "6billion_wiki", name)
def test_glove_twitter_200():
""" Evaluate Glove pretrained on google Twitter"""
model = gensim.models.KeyedVectors.load_word2vec_format('pretrained/twitter_glove_27B_200d.vec', binary=False)
name = "twitter_glove_27B_200d"
save_results(model, 200, "?", "-", "glove", "27billion_twitter", name)
def test_glove_commoncrawl_300():
""" Evaluate Glove pretrained on common crawl corpora"""
model = gensim.models.KeyedVectors.load_word2vec_format('pretrained/commoncrawl_glove_840B_300d.vec', binary=False)
name = "commoncrawl_glove_840B_300d"
save_results(model, 300, "?", "-", "glove", "840billion_commoncrawl", name)
def test_fashion_retrofitted():
""" Evaluate retrofitted fashion vectors"""
vectorFile = "retrofitted/test.vec"
model = gensim.models.KeyedVectors.load_word2vec_format(vectorFile, binary=False)
save_results(model, 300, 3, "?", "glove", "74million_fashion", "test")
def test_fashion(dim, context, train_model, algorithm, binary):
""" Evaluate our own vectors trained on IG corpora """
vectorFile = "trained/" + str(algorithm) + "_fashion_dim" + str(dim) + "_c" + str(context) + "_" + str(
train_model) + ".vec"
name = str(algorithm) + "_fashion_dim" + str(dim) + "_c" + str(context) + "_" + str(train_model)
model = gensim.models.KeyedVectors.load_word2vec_format(vectorFile, binary=binary)
save_results(model, dim, context, train_model, algorithm, "74million_fashion", name)
def convert_gensim_to_word2vec_format(fileName):
"""Converts gensim exportation format to word2vec format"""
model = gensim.models.KeyedVectors.load(fileName)
word_vectors = model.wv
word_vectors.save_word2vec_format(fileName)
def convert_glove_to_word2vec_format():
""" Converts Glove format to Word2Vec format"""
glove2word2vec(glove_input_file="pretrained/glove_wiki_6B_300d.txt",
word2vec_output_file="pretrained/glove_wiki_6B_300d.vec")
glove2word2vec(glove_input_file="pretrained/glove_wiki_6B_200d.txt",
word2vec_output_file="pretrained/glove_wiki_6B_200d.vec")
glove2word2vec(glove_input_file="pretrained/glove_wiki_6B_100d.txt",
word2vec_output_file="pretrained/glove_wiki_6B_100d.vec")
glove2word2vec(glove_input_file="pretrained/glove_wiki_6B_50d.txt",
word2vec_output_file="pretrained/glove_wiki_6B_50d.vec")
glove2word2vec(glove_input_file="pretrained/twitter_glove_27B_25d.txt",
word2vec_output_file="pretrained/twitter_glove_27B_25d.vec")
glove2word2vec(glove_input_file="pretrained/twitter_glove_27B_50d.txt",
word2vec_output_file="pretrained/twitter_glove_27B_50d.vec")
glove2word2vec(glove_input_file="pretrained/twitter_glove_27B_100d.txt",
word2vec_output_file="pretrained/twitter_glove_27B_100d.vec")
glove2word2vec(glove_input_file="pretrained/twitter_glove_27B_200d.txt",
word2vec_output_file="pretrained/twitter_glove_27B_200d.vec")
def save_fasttext_bin_to_vec(model, output_path):
"""Converts FastText binary format to word2vec format"""
words = model.get_words()
with open(output_path, 'w+') as vecFile:
vecFile.write((str(len(words)) + " " + str(model.get_dimension())) + "\n")
for w in words:
v = model.get_word_vector(w)
vstr = ""
for vi in v:
vstr += " " + str(vi)
try:
vecFile.write((w + vstr + "\n"))
except IOError as e:
if e.errno == errno.EPIPE:
pass
def save_glove_bin_to_vec(model, output_path):
"""Converts Glove binary format to word2vec format"""
with open(output_path, 'w+') as vecFile:
(rows, cols) = model.word_vectors.shape
vecFile.write(str(rows) + " " + str(cols) + "\n")
for word, idx in model.dictionary.iteritems():
v = model.word_vectors[idx]
vstr = ""
for vi in v:
vstr += " " + str(vi)
try:
vecFile.write((word + vstr + "\n"))
except IOError as e:
if e.errno == errno.EPIPE:
pass
def save_retrofitted_to_vec(wordVecs, output_path):
""" Save retrofitted vectors to word2vec format"""
with open(output_path, 'w+') as vecFile:
rows = len(wordVecs.keys())
cols = len(wordVecs.itervalues().next())
vecFile.write(str(rows) + " " + str(cols) + "\n")
for word, v in wordVecs.iteritems():
vstr = ""
for vi in v:
vstr += " " + str(vi)
try:
vecFile.write((word + vstr + "\n"))
except IOError as e:
if e.errno == errno.EPIPE:
pass
# Default params
# epochs:15
# threads:8
# minCount:5
# learning rate: 0.05
# learning rate update reate: 100
# wordNgrams: 1
# minn: 3
# maxn: 6
# neg: 5
# t= 1e-4
def train_fasttext_fashionrec(dimensionality, context, train_model, epochs):
""" Train with FastText on IG corpora"""
total_count, vocab_size = corpus_stats("data/clean2_corpus.txt")
print("total word count: {}, vocabulary size: {}".format(total_count, vocab_size))
start_time = datetime.now()
model = train_unsupervised(
input=os.path.join("data/clean2_corpus.txt"),
dim=dimensionality,
ws=context,
model=train_model,
epoch=15
)
time_elapsed = datetime.now() - start_time
output_path = "trained/fasttext_fashion_dim" + str(dimensionality) + "_c" + str(context) + "_" + str(train_model)
model.save_model(output_path + ".bin")
save_fasttext_bin_to_vec(load_model(output_path + ".bin"), output_path + ".vec")
fileName = "results/training/fasttext_fashion_epoch" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_" + str(train_model) + ".txt"
notes = "FastText FashionData, " + str(epochs) + " epochs, " + str(dimensionality) + " dim, " + str(
context) + " context, " + str(train_model) + " train mode\n" + "Training time: " + str(time_elapsed)
save_to_file(fileName, notes)
# Default params:
# epochs: 15
# threads: 8
# alpha (learning rate):0.025
# min_count=5
# seed=1
# negative=5 (number of negative samples)
# cbow_mean=1
def train_word2vec_fashionrec(dimensionality, context, train_model, epochs):
""" Train with Word2Vec on IG corpora"""
total_count, vocab_size = corpus_stats("data/clean2_corpus.txt")
print("total word count: {}, vocabulary size: {}".format(total_count, vocab_size))
sentences = gensim.models.word2vec.LineSentence("data/clean2_corpus.txt")
start_time = datetime.now()
# sg = 1 => skip-gram, sg = 0 => CBOW
model = gensim.models.Word2Vec(sentences, size=dimensionality, window=context, workers=8, sg=train_model, iter=15)
time_elapsed = datetime.now() - start_time
word_vectors = model.wv
output_path = "trained/word2vec_fashion_dim" + str(dimensionality) + "_c" + str(context) + "_" + str(train_model)
word_vectors.save(output_path + ".vec")
fileName = "results/training/word2vec_fashion_epoch" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_" + str(train_model) + ".txt"
notes = "Word2Vec Fashion Data, " + str(epochs) + " epochs, " + str(dimensionality) + " dim, " + str(
context) + " context, " + str(train_model) + " train mode\n" + "Training time: " + str(time_elapsed)
save_to_file(fileName, notes)
def train_word2vec_wordrank(dimensionality, context, train_model, epochs):
""" Train with Word2vec on IG corpora"""
total_count, vocab_size = corpus_stats("data/clean2_corpus.txt")
print("total word count: {}, vocabulary size: {}".format(total_count, vocab_size))
sentences = gensim.models.word2vec.LineSentence("data/clean2_corpus.txt")
start_time = datetime.now()
# sg = 1 => skip-gram, sg = 0 => CBOW
model = gensim.models.Word2Vec(sentences, size=dimensionality, window=context, workers=8, sg=train_model, iter=15)
time_elapsed = datetime.now() - start_time
word_vectors = model.wv
output_path = "trained/word2vec_fashion_dim" + str(dimensionality) + "_c" + str(context) + "_" + str(train_model)
word_vectors.save(output_path + ".vec")
fileName = "results/training/word2vec_fashion_epoch" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_" + str(train_model) + ".txt"
notes = "Word2Vec Fashion Data, " + str(epochs) + " epochs, " + str(dimensionality) + " dim, " + str(
context) + " context, " + str(train_model) + " train mode\n" + "Training time: " + str(time_elapsed)
save_to_file(fileName, notes)
# Default params:
# epochs: 15
# threads: 8
# learning-rate:0.05
# alpha:0.75
# max_count:100
# max_loss: 10
# no_components:30
# symmetric context
def train_glove_fashionrec(dimensionality, context, epochs):
""" Train with Glove on IG corpora"""
total_count, vocab_size = corpus_stats("data/clean2_corpus.txt")
print("total word count: {}, vocabulary size: {}".format(total_count, vocab_size))
fileName = "results/training/glove_fashion_epochs" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_" + ".txt"
corpus = readCorpus()
lines = corpus.split("\n")
linessplit = map(lambda x: x.split(" "), lines)
corpus_model = Corpus()
start_time = datetime.now()
corpus_model.fit(linessplit, window=context)
corpusModelFile = "trained/glove_fashion_epochs" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_corpus" + ".model"
corpus_model.save(corpusModelFile)
glove = Glove(no_components=dimensionality, learning_rate=0.05)
glove.fit(corpus_model.matrix, epochs=int(epochs),
no_threads=8, | |
import numpy as np
import matplotlib.pyplot as plt
import hrgames as hrg
import sys
def classic_pd(R=None, tf=None, xini=None):
"""Plays a classic prisoner's dilemma between two players"""
# Two connected players
A = np.zeros([2, 2])
A[0, 1] = 1
A[1, 0] = 1
# Classic positive prisoner's dilemma
B = np.zeros([2, 2])
B[0, 0] = 3 # R
B[0, 1] = 1 # S
B[1, 0] = 4 # T
B[1, 1] = 2 # P
# Relationship matrix
if R is None or R.shape != (2, 2):
R = np.zeros([2, 2], dtype='double')
R[0, 0] = 2/3 + 0.05
R[0, 1] = 1/3 - 0.05
R[1, 1] = 2/3 - 0.05
R[1, 0] = 1/3 + 0.05
# Initial Condition, 0.5 in all strategies for all players
if xini is None or xini.shape != (6, 2):
xini = np.divide(np.ones([2, 2], dtype='double'), 2)
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf-t0)*10
h = (tf-t0)/n
x = hrg.hr_game(t0, tf, n, A, B, R, xini)
# Plot results
xaxis = np.arange(t0, tf+h, h)
plt.plot(xaxis, x[0, 0, :], 'r', label='Player 1')
plt.plot(xaxis, x[1, 0, :], 'b', label='Player 2', alpha=0.7)
plt.ylim([-0.05, 1.05])
plt.legend(['Player 1', 'Player 2'])
plt.title("Cooperation probability")
plt.show()
plt.close()
return None
def classic_pd_negative(R=None, tf=None, xini=None):
"""Plays a classic prisoner's dilemma between two players"""
# Two connected players
A = np.zeros([2, 2])
A[0, 1] = 1
A[1, 0] = 1
# Classic negative prisoner's dilemma
B = np.zeros([2, 2])
B[0, 0] = -2 # R
B[0, 1] = -7 # S
B[1, 0] = 0 # T
B[1, 1] = -5 # P
# Relationship matrix
if R is None or R.shape != (2, 2):
R = np.zeros([2, 2], dtype='double')
R[0, 0] = 5/7 - 0.05
R[0, 1] = 2/7 + 0.05
R[1, 1] = 5/7 + 0.05
R[1, 0] = 2/7 - 0.05
# Initial Condition, 0.5 in all strategies for all players
if xini is None or xini.shape != (2, 2):
xini = np.divide(np.ones([2, 2], dtype='double'), 2)
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf-t0)*10
h = (tf-t0)/n
x = hrg.hr_game(t0, tf, n, A, B, R, xini)
# Plot results
xaxis = np.arange(t0, tf+h, h)
plt.plot(xaxis, x[0, 0, :], 'r', label='Player 1')
plt.plot(xaxis, x[1, 0, :], 'b', label='Player 2', alpha=0.7)
plt.ylim([-0.05, 1.05])
plt.legend(['Player 1', 'Player 2'])
plt.title("Cooperation probability")
plt.show()
plt.close()
return None
def hinge_love_triangle(R=None, tf=None, xini=None):
"""Plays a love triangle game in a 'hinge' graph, a path with 3 nodes"""
# A hinge, path with 3 nodes
A = np.zeros([3, 3])
A[0, 1] = 1
A[1, 0] = 1
A[1, 2] = 1
A[2, 1] = 1
# Payoff matrices
# Person 1 wants to play s1 against Person 2
# Person 2 only benefits when playing s2 against Person 1
# Person 2 has no benefit from playing with Person 3
B = np.zeros([2, 2, 3])
# Player 1 payoff matrix
B[0, 0, 0] = 0
B[0, 1, 0] = 1
B[1, 0, 0] = 0
B[1, 1, 0] = 0
# Player 2 payoff matrix
B[0, 0, 1] = 0
B[0, 1, 1] = 0
B[1, 0, 1] = 1
B[1, 1, 1] = 0
# Player 3 payoff matrix
B[0, 0, 2] = 1
B[0, 1, 2] = 0
B[1, 0, 2] = 0
B[1, 1, 2] = 0
# Relationship matrix
if R is None or R.shape != (3, 3):
R = np.zeros([3, 3], dtype='double')
# Person 1 cares equally about itself and it's partner
R[0, 0] = 1/2
R[0, 1] = 1/2
# Person 2 cares about itself and Person 3
R[1, 1] = 1/2 + 0.05
R[1, 2] = 1/2 - 0.05
# Person cares only about itself
R[2, 2] = 1
# Initial Condition, 0.5 in all strategies for all players
if xini is None or xini.shape != (3, 2):
xini = np.divide(np.ones([3, 2], dtype='double'), 2)
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf - t0) * 10
h = (tf - t0) / n
x = hrg.hr_game(t0, tf, n, A, B, R, xini)
# Plot results
xaxis = np.arange(t0, tf + h, h)
plt.plot(xaxis, x[0, 0, :], 'r', label='Player 1')
plt.plot(xaxis, x[1, 0, :], 'b', label='Player 2', alpha=0.7)
plt.plot(xaxis, x[2, 0, :], 'g', label='Player 2', alpha=0.7)
plt.ylim([-0.05, 1.05])
plt.legend(['Player 1', 'Player 2', 'Player 3'])
plt.title("Probability of using strategy 1")
plt.show()
plt.close()
return None
def k3_love_triangle(R=None, tf=None, xini=None):
"""Plays a love triangle game in a k3 graph, a complete graph with 3 nodes"""
# A k3, a complete graph with 3 nodes
A = np.subtract(np.ones([3, 3]), np.eye(3))
# Payoff matrices
# Person 1 wants to play s1 against Person 2
# Person 2 only benefits when playing s2 against Person 1
# Person 2 has no benefit from playing with Person 3
B = np.zeros([2, 2, 3])
# Player 1 payoff matrix
B[0, 0, 0] = 0
B[0, 1, 0] = 1
B[1, 0, 0] = 0
B[1, 1, 0] = 0
# Player 2 payoff matrix
B[0, 0, 1] = 0
B[0, 1, 1] = 0
B[1, 0, 1] = 1
B[1, 1, 1] = 0
# Player 3 payoff matrix
B[0, 0, 2] = 1
B[0, 1, 2] = 0
B[1, 0, 2] = 0
B[1, 1, 2] = 0
# Relationship matrix
if R is None or R.shape != (3, 3):
R = np.zeros([3, 3], dtype='double')
# Person 1 cares equally about itself and it's partner, but dislikes Person 3
R[0, 0] = 2/5
R[0, 1] = 2/5
R[0, 2] = -1/5
# Person 2 cares about itself and Person 3
R[1, 1] = 1/2 - 0.05
R[1, 2] = 1/2 + 0.05
# Person cares only about itself
R[2, 2] = 1
# Initial Condition, 0.5 in all strategies for all players
if xini is None or xini.shape != (3, 2):
xini = np.divide(np.ones([3, 2], dtype='double'), 2)
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf - t0) * 10
h = (tf - t0) / n
x = hrg.hr_game(t0, tf, n, A, B, R, xini)
# Plot results
xaxis = np.arange(t0, tf + h, h)
plt.plot(xaxis, x[0, 0, :], 'r', label='Player 1')
plt.plot(xaxis, x[1, 0, :], 'b', label='Player 2', alpha=0.7)
plt.plot(xaxis, x[2, 0, :], 'g', label='Player 2', alpha=0.7)
plt.ylim([-0.05, 1.05])
plt.legend(['Player 1', 'Player 2', 'Player 3'])
plt.title("Probability of using strategy 1")
plt.show()
plt.close()
return None
def closed_star_pd(R=None, tf=None, xini=None):
"""Plays a prisoner's dilemma on a closed star with 6 vertices"""
# Closed star, it's easier to erase connections
A = np.subtract(np.ones([6, 6]), np.eye(6))
A[1, 3] = 0
A[1, 4] = 0
A[2, 4] = 0
A[2, 5] = 0
A[3, 5] = 0
A[3, 1] = 0
A[4, 1] = 0
A[4, 2] = 0
A[5, 2] = 0
A[5, 3] = 0
# Classic positive prisoner's dilemma
B = np.zeros([2, 2])
B[0, 0] = 3 # R
B[0, 1] = 1 # S
B[1, 0] = 4 # T
B[1, 1] = 2 # P
# Relationship matrix, everyone's selfish
if R is None or R.shape != (6, 6):
R = np.eye(6)
# Initial Condition
if xini is None or xini.shape != (6, 2):
xini = np.zeros([6, 2], dtype='double')
# Natural cooperators
xini[0, 0] = 0.99
xini[1, 0] = 0.99
xini[3, 0] = 0.99
xini[5, 0] = 0.99
# Natural defectors
xini[2, 0] = 0.01
xini[4, 0] = 0.01
# Iterate to complete assignments
for i in range(6):
xini[i, 1] = np.subtract(1, xini[i, 0])
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf-t0)*10
h = (tf-t0)/n
x = hrg.hr_game(t0, tf, n, | |
<reponame>imaginaryusername/Electron-Cash
#
# This file is:
# Copyright (C) 2018 <NAME> <<EMAIL>>
#
# MIT License
#
from . import utils
from . import gui
from .history import HistoryEntry
from . import txdetail
from . import contacts
from electroncash import WalletStorage, Wallet
from electroncash.util import timestamp_to_datetime, NotEnoughFunds, ExcessiveFee
from electroncash.transaction import Transaction
from electroncash.i18n import _
from .custom_objc import *
from .uikit_bindings import *
from electroncash import networks
from electroncash.address import Address, ScriptOutput
from electroncash.paymentrequest import PaymentRequest
from electroncash import bitcoin
from .feeslider import FeeSlider
from .amountedit import BTCAmountEdit
from electroncash.plugins import run_hook
import time, html, re, sys, traceback
from decimal import Decimal
RE_ALIAS = '^(.*?)\s*\<([1-9A-Za-z]{26,})\>$'
def parent():
return gui.ElectrumGui.gui
def config():
return parent().config
def wallet():
return parent().wallet
def fx():
p = parent()
if p and p.daemon and p.daemon.fx:
return p.daemon.fx
return None
_CellIdentifier = "SpendFromCell"
_TableHeaderHeight = 25
_TableCellHeight = 20
_TableHeightRows = 4.45
class SendVC(SendBase):
qr = objc_property()
qrvc = objc_property()
qrScanErr = objc_property()
amountSats = objc_property()
feeSats = objc_property()
isMax = objc_property()
notEnoughFunds = objc_property()
excessiveFee = objc_property()
timer = objc_property()
dismissOnAppear = objc_property()
kbas = objc_property()
queuedPayTo = objc_property()
@objc_method
def init(self):
self = ObjCInstance(send_super(__class__, self, 'init'))
self.title = _("Send")
self.qrScanErr = False
self.amountSats = None # None ok on this one
self.feeSats = None # None ok on this one too
self.isMax = False # should always be defined
self.notEnoughFunds = False
self.excessiveFee = False
self.timer = None
self.dismissOnAppear = False
self.kbas = None
self.queuedPayTo = None
self.navigationItem.leftItemsSupplementBackButton = True
bb = UIBarButtonItem.new().autorelease()
bb.title = _("Back")
self.navigationItem.backBarButtonItem = bb
return self
@objc_method
def dealloc(self) -> None:
self.qrScanErr = None
self.amountSats = None
self.feeSats = None
self.isMax = None
self.notEnoughFunds = None
self.qr = None
self.qrvc = None
self.dismissOnAppear = None
if self.timer: self.timer.invalidate() # kill a timer if it hasn't fired yet
self.timer = None
self.excessiveFee = None
self.kbas = None
self.queuedPayTo = None
utils.nspy_pop(self)
for e in [self.amt, self.fiat, self.payTo]:
if e: utils.nspy_pop(e)
send_super(__class__, self, 'dealloc')
@objc_method
def didRotateFromInterfaceOrientation_(self, o : int) -> None:
pass
@objc_method
def reader_didScanResult_(self, reader, result) -> None:
utils.NSLog("Reader data = '%s'",str(result))
self.checkQRData_(result)
if self.qrScanErr:
if type(self.qrScanErr) is int and self.qrScanErr == 2:
title = _("Unsupported QR Code")
message = _("The QR code contains multiple outputs. At this time only a single output is supported.\nPlease try again.")
else:
title = _("Invalid QR Code")
message = _("The QR code does not appear to be a valid BCH address or payment request.\nPlease try again.")
reader.stopScanning()
parent().show_error(
title = title,
message = message,
onOk = lambda: reader.startScanning()
)
self.qrScanErr = False
else:
self.readerDidCancel_(reader)
@objc_method
def readerDidCancel_(self, reader) -> None:
if reader is not None: reader.stopScanning()
self.dismissViewControllerAnimated_completion_(True, None)
self.qr = None
self.qrvc = None
@objc_method
def loadView(self) -> None:
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("Send",self,None)
assert objs is not None and len(objs)
# Apply translations and other stuff to UI text...
self.payToTit.setText_withKerning_(_("Pay to"), utils._kern)
# Input amount text field
btcedit = self.amt
fiatedit = self.fiat
def onAmount(t : ObjCInstance) -> None:
#print("On Amount %s, %s satoshis"%(str(t.text),str(t.getAmount())))
self.amountSats = t.getAmount()
fiatModified = False
if fx() and fx().is_enabled():
rate = fx().exchange_rate()
if rate:
amtfiat = int(round(float((Decimal(self.amountSats) * Decimal(100.0) * Decimal(rate)) / Decimal(1e8)))) if self.amountSats is not None else None
fiatModified = fiatedit.isModified()
fiatedit.setAmount_(amtfiat)
utils.uitf_redo_attrs(fiatedit)
if fiatModified or t.isModified():
#print ("updating fee...")
self.updateFee()
else: self.chkOk()
utils.add_callback(btcedit, 'textChanged', onAmount)
def onEdit(t : ObjCInstance) -> None:
self.isMax = False
utils.add_callback(btcedit, 'edited', onEdit)
btcedit.setUseUnitLabel_(True)
btcedit.fixedUnitLabelWidth = 50.0
# Amount (Fiat) label
# Input Fiat text field
def onAmountFiat(t : ObjCInstance) -> None:
#print("On Fiat Amount %s, %s %s"%(str(t.text),str(t.getAmount()),str(t.baseUnit())))
if not t.isModified() or not fx() or not fx().is_enabled():
return
rate = fx().exchange_rate()
if not rate: return
amtSats = int(round(float( (Decimal(t.getAmount())*Decimal(1e6)) / Decimal(rate) ))) if t.getAmount() is not None else None
btcedit.setAmount_(amtSats)
utils.uitf_redo_attrs(btcedit)
utils.add_callback(fiatedit, 'textChanged', onAmountFiat)
def onEditFiat(t : ObjCInstance) -> None:
self.isMax = False
utils.add_callback(fiatedit, 'edited', onEditFiat)
fiatedit.setUseUnitLabel_(True)
fiatedit.fixedUnitLabelWidth = 50.0
self.descTit.setText_withKerning_( _("Description"), utils._kern )
but = self.maxBut
but.setTitle_forState_(_("Max"), UIControlStateNormal)
# Fee Label
self.feeTit.setText_withKerning_( _("Fee"), utils._kern )
tedit = self.feeTf
fee_e = tedit
tedit.placeholder = _("Fee manual edit")
def onManualFee(t : ObjCInstance) -> None:
#print("On Manual fee %s, %s satoshis"%(str(t.text),str(t.getAmount())))
self.feeSats = t.getAmount()
if t.isModified(): self.updateFee()
else: self.chkOk()
utils.add_callback(fee_e, 'textChanged', onManualFee)
fee_e.setUseUnitLabel_(True)
fee_e.fixedUnitLabelWidth = 50.0
# Error Label
self.message.text = ""
self.descDel.placeholderFont = UIFont.italicSystemFontOfSize_(14.0)
self.descDel.tv = self.desc
self.descDel.text = ""
self.descDel.placeholderText = _("Description of the transaction (not mandatory).")
feelbl = self.feeLbl
slider = self.feeSlider
def sliderCB(dyn : bool, pos : int, fee_rate : int) -> None:
txt = " ".join(str(slider.getToolTip(pos,fee_rate)).split("\n"))
feelbl.text = txt
fee_e.modified = False # force unfreeze fee
if dyn:
config().set_key('fee_level', pos, False)
else:
config().set_key('fee_per_kb', fee_rate, False)
self.spendMax() if self.isMax else self.updateFee()
#print("testcb: %d %d %d.. tt='%s'"%(int(dyn), pos, fee_rate,txt))
utils.add_callback(slider, 'callback', sliderCB)
utils.nspy_put_byname(self, 'dummy', '_last_spend_from') # trigger the clear
# set up navigation bar items...
self.clearBut.title = _("Clear")
but = self.sendBut
but.setTitle_forState_(_("Send"), UIControlStateNormal)
barButPreview = self.previewBut
barButPreview.title = _("Preview")
self.navigationItem.rightBarButtonItems = [barButPreview]
extra = self.navigationItem.leftBarButtonItems if self.navigationItem.leftBarButtonItems else []
self.navigationItem.leftBarButtonItems = [*extra, self.clearBut]
@objc_method
def viewDidLoad(self) -> None:
uinib = UINib.nibWithNibName_bundle_("SpendFromCell", None)
self.tv.registerNib_forCellReuseIdentifier_(uinib, _CellIdentifier)
self.clearAllExceptSpendFrom()
@objc_method
def viewWillAppear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewWillAppear:', animated, argtypes=[c_bool])
if self.dismissOnAppear and self.presentingViewController and not self.isBeingDismissed():
self.presentingViewController.dismissViewControllerAnimated_completion_(animated, None)
return
if self.queuedPayTo:
try:
qpt = list(self.queuedPayTo)
self.queuedPayTo = None
self.onPayTo_message_amount_(qpt[0],qpt[1],qpt[2])
except:
utils.NSLog("queuedPayTo.. failed with exception: %s",str(sys.exc_info()[1]))
self.kbas = utils.register_keyboard_autoscroll(self.view.viewWithTag_(54321))
# redo amount label if prefs changed
lbl = self.amtTit
tedit = self.amt
lbl.setText_withKerning_( _("Amount") , utils._kern )
# Placeholder for amount
tedit.placeholder = _("Input amount")
wasModified = tedit.isModified()
tedit.setAmount_(self.amountSats) # in case unit changed in prefs
tedit.modified = wasModified
# fee amount label
lbl = self.feeLbl
lbl.text = self.feeSlider.getToolTip(-1,-1)
# Manual edit .. re-set the amount in satoshis from our cached value, in case they changed units in the prefs screen
tedit = self.feeTf
wasModified = tedit.isModified()
tedit.setAmount_(self.feeSats)
tedit.modified = wasModified
# set manual fee edit to be enabled/disabled based on prefs settings
if parent().prefs_get_show_fee():
tedit.userInteractionEnabled = True
tedit.alpha = 1.0
else:
tedit.userInteractionEnabled = False
tedit.alpha = .5
# set fiat lbl/tedit based on prefs settings
doFX = fx() and fx().is_enabled()
ccy = fx().get_currency() if doFX else None
fiatte = self.fiat
fiatte.setHidden_(not doFX)
if doFX:
fiatte.placeholder = _("Fiat amount")
feelbl = self.feeTit
c = self.csFeeTop
if c is not None:
c.constant = 25.0 if doFX else -28.0
parent().cash_addr_sig.connect(lambda: self.reformatSpendFrom(), self)
self.reformatSpendFrom()
pay_to = utils.nspy_get_byname(self, 'pay_to')
if pay_to is not None:
if isinstance(pay_to, str):
self.payTo.text = pay_to
utils.nspy_pop_byname(self, 'pay_to')
utils.uitf_redo_attrs(self.payTo)
utils.uitf_redo_attrs(self.amt)
utils.uitf_redo_attrs(self.fiat)
utils.uitf_redo_attrs(self.feeTf)
self.chkOk()
@objc_method
def viewDidAppear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewDidAppear:', animated, argtypes=[c_bool])
parent().show_warning_if_watching_only(vc = self,
onOk = lambda: self.presentingViewController.dismissViewControllerAnimated_completion_(True, None))
if not self.tv.isHidden(): self.tv.flashScrollIndicators()
@objc_method
def reformatSpendFrom(self) -> None:
# Do the "spend from" stuff
self.tv.reloadData()
coins = utils.nspy_get_byname(self, 'spend_from')
if utils.nspy_get_byname(self, '_last_spend_from') == coins:
return
utils.nspy_put_byname(self, coins, '_last_spend_from')
self.updateFee()
@objc_method
def viewWillDisappear_(self, animated: bool) -> None:
send_super(__class__, self, 'viewWillDisappear:', animated, argtypes=[c_bool])
# Manual edit .. cache the feeSats in case they change stuff in prefs affecting this
tedit = self.feeTf
self.feeSats = tedit.getAmount()
# Amount edit -- cache the amountSats in case they change stuff in the prefs affecting this
tedit = self.amt
self.amountSats = tedit.getAmount()
parent().cash_addr_sig.disconnect(self)
if self.kbas:
utils.unregister_keyboard_autoscroll(int(self.kbas))
self.kbas = None
@objc_method
def onQRBut_(self, but) -> None:
def DoIt() -> None:
if not QRCodeReader.isAvailable:
utils.show_alert(self, _("QR Not Avilable"), _("The camera is not available for reading QR codes"))
else:
self.qr = QRCodeReader.new().autorelease()
self.qrvc = QRCodeReaderViewController.readerWithCancelButtonTitle_codeReader_startScanningAtLoad_showSwitchCameraButton_showTorchButton_("Cancel",self.qr,True,False,False)
self.qrvc.modalPresentationStyle = UIModalPresentationFormSheet
self.qrvc.delegate = self
self.presentViewController_animated_completion_(self.qrvc, True, None)
self.qrScanErr = False
utils.boilerplate.vc_highlight_button_then_do(self, but, DoIt)
@objc_method
def onContactBut_(self, but) -> None:
def DoIt() -> None:
def onPayTo(addys : list) -> None:
if contacts.pay_to(addys):
self.dismissViewControllerAnimated_completion_(True, None)
vc = contacts.ContactsVC.alloc().initWithMode_(contacts.ModePicker).autorelease()
nav = utils.tintify(UINavigationController.alloc().initWithRootViewController_(vc).autorelease())
utils.add_callback(vc, 'on_pay_to', onPayTo)
if self.payTo and self.payTo.text:
utils.nspy_put_byname(vc, self.payTo.text, 'preselected')
self.presentViewController_animated_completion_(nav, True, None)
utils.boilerplate.vc_highlight_button_then_do(self, but, DoIt)
@objc_method
def onMaxBut_(self, but) -> None:
utils.boilerplate.vc_highlight_button_then_do(self, but, lambda:self.spendMax())
@objc_method
def textFieldShouldEndEditing_(self, tf : ObjCInstance) -> bool:
#print('textFieldShouldEndEditing %d'%tf.tag)
if tf.tag in [115,230]:
tf.text = tf.text.strip() # strip leading/training spaces in description and address text fields
if tf.tag in | |
detected.
link_aggregation: This field indicates if this is a link aggregation
interface.
mac: The MAC address of the interface.
ms_ad_user_data: The Microsoft Active Directory user related
information.
name: The interface system name.
network_view: Th name of the network view.
oper_status: Operating state of the interface.
port_fast: The Port Fast status of the interface.
reserved_object: The reference to
object(Host/FixedAddress/GridMember) to which this port is
reserved.
speed: The interface speed in bps.
trunk_status: Indicates if the interface is tagged as a VLAN trunk
or not.
type: The type of interface.
vlan_info_task_info: The configured VLAN status task info of the
interface.
vlan_infos: The list of VLAN information associated with the
interface.
vrf_description: The description of the Virtual Routing and
Forwarding (VRF) associated with the interface.
vrf_name: The name of the Virtual Routing and Forwarding (VRF)
associated with the interface.
vrf_rd: The route distinguisher of the Virtual Routing and
Forwarding (VRF) associated with the interface.
"""
_infoblox_type = 'discovery:deviceinterface'
_fields = ['admin_status', 'admin_status_task_info',
'cap_if_admin_status_ind', 'cap_if_admin_status_na_reason',
'cap_if_description_ind', 'cap_if_description_na_reason',
'cap_if_net_deprovisioning_ipv4_ind',
'cap_if_net_deprovisioning_ipv4_na_reason',
'cap_if_net_deprovisioning_ipv6_ind',
'cap_if_net_deprovisioning_ipv6_na_reason',
'cap_if_net_provisioning_ipv4_ind',
'cap_if_net_provisioning_ipv4_na_reason',
'cap_if_net_provisioning_ipv6_ind',
'cap_if_net_provisioning_ipv6_na_reason',
'cap_if_vlan_assignment_ind',
'cap_if_vlan_assignment_na_reason', 'cap_if_voice_vlan_ind',
'cap_if_voice_vlan_na_reason', 'description',
'description_task_info', 'device', 'duplex', 'extattrs',
'ifaddr_infos', 'index', 'last_change', 'link_aggregation',
'mac', 'ms_ad_user_data', 'name', 'network_view', 'oper_status',
'port_fast', 'reserved_object', 'speed', 'trunk_status', 'type',
'vlan_info_task_info', 'vlan_infos', 'vrf_description',
'vrf_name', 'vrf_rd']
_search_for_update_fields = ['name', 'type']
_updateable_search_fields = []
_all_searchable_fields = ['description', 'mac', 'name', 'network_view',
'oper_status', 'speed', 'type',
'vrf_description', 'vrf_name', 'vrf_rd']
_return_fields = ['extattrs', 'name', 'type']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'ifaddr_infos': DiscoveryIfaddrinfo.from_dict,
'vlan_infos': DiscoveryVlaninfo.from_dict,
}
class DiscoveryDeviceneighbor(InfobloxObject):
""" DiscoveryDeviceneighbor: Device Neighbor object.
Corresponds to WAPI object 'discovery:deviceneighbor'
The neighbor associated with the device discovered by Network
Automation.
Fields:
address: The IPv4 Address or IPv6 Address of the device neighbor.
address_ref: The ref to the management IP address of the device
neighbor.
device: The ref to the device to which the device neighbor belongs.
interface: The ref to the interface to which the device neighbor
belongs.
mac: The MAC address of the device neighbor.
name: The name of the device neighbor.
vlan_infos: The list of VLAN information associated with the device
neighbor.
"""
_infoblox_type = 'discovery:deviceneighbor'
_fields = ['address', 'address_ref', 'device', 'interface', 'mac', 'name',
'vlan_infos']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = ['device']
_return_fields = ['address', 'address_ref', 'mac', 'name']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'vlan_infos': DiscoveryVlaninfo.from_dict,
}
class DiscoveryDevicesupportbundle(InfobloxObject):
""" DiscoveryDevicesupportbundle: Device support bundle object.
Corresponds to WAPI object 'discovery:devicesupportbundle'
Infoblox frequently provides support files for additional network
devices that may not have previously been supported by discovery,
and updates to support new operating system versions of existing
devices.
The device support bundle represents the entity for displaying and
managing device support files.
Fields:
author: The developer of the device support bundle.
integrated_ind: Determines whether the device support bundle is
integrated or imported. Note that integrated support bundles
cannot be removed.
name: The descriptive device name for the device support bundle.
version: The version of the currently active device support bundle.
"""
_infoblox_type = 'discovery:devicesupportbundle'
_fields = ['author', 'integrated_ind', 'name', 'version']
_search_for_update_fields = ['name']
_updateable_search_fields = []
_all_searchable_fields = ['name']
_return_fields = ['author', 'integrated_ind', 'name', 'version']
_remap = {}
_shadow_fields = ['_ref']
class DiscoveryDiagnostictask(InfobloxObject):
""" DiscoveryDiagnostictask: The discovery diagnostic task object.
Corresponds to WAPI object 'discovery:diagnostictask'
The object provides information about the discovery diagnostic task.
Fields:
community_string: The SNMP community string of the discovery
diagnostic task.
debug_snmp: The SNMP debug flag of the discovery diagnostic task.
force_test: The force test flag of the discovery diagnostic task.
ip_address: The IP address of the discovery diagnostic task.
network_view: The network view name of the discovery diagnostic
task.
start_time: The time when the discovery diagnostic task was started.
task_id: The ID of the discovery diagnostic task.
"""
_infoblox_type = 'discovery:diagnostictask'
_fields = ['community_string', 'debug_snmp', 'force_test', 'ip_address',
'network_view', 'start_time', 'task_id']
_search_for_update_fields = ['ip_address', 'network_view', 'task_id']
_updateable_search_fields = ['ip_address', 'network_view', 'task_id']
_all_searchable_fields = ['ip_address', 'network_view', 'task_id']
_return_fields = ['ip_address', 'network_view', 'task_id']
_remap = {}
_shadow_fields = ['_ref']
class DiscoveryGridproperties(InfobloxObject):
""" DiscoveryGridproperties: The Grid discovery properties object.
Corresponds to WAPI object 'discovery:gridproperties'
The object provides information about the Grid discovery properties.
Fields:
advanced_polling_settings: Discovery advanced polling settings.
advisor_settings: Advisor settings.
auto_conversion_settings: Automatic conversion settings.
basic_polling_settings: Discovery basic polling settings.
cli_credentials: Discovery CLI credentials.
discovery_blackout_setting: Discovery blackout setting.
dns_lookup_option: The type of the devices the DNS processor
operates on.
dns_lookup_throttle: The percentage of available capacity the DNS
processor operates at.Valid values are unsigned integer between
1 and 100, inclusive.
enable_advisor: Advisor application enabled/disabled.
enable_auto_conversion: The flag that enables automatic conversion
of discovered data.
enable_auto_updates: The flag that enables updating discovered data
for managed objects.
grid_name: The Grid name.
ignore_conflict_duration: Determines the timeout to ignore the
discovery conflict duration (in seconds).
port_control_blackout_setting: Port control blackout setting.
ports: Ports to scan.
same_port_control_discovery_blackout: Determines if the same port
control is used for discovery blackout.
snmpv1v2_credentials: Discovery SNMP v1 and v2 credentials.
snmpv3_credentials: Discovery SNMP v3 credentials.
unmanaged_ips_limit: Limit of discovered unmanaged IP address which
determines how frequently the user is notified about the new
unmanaged IP address in a particular network.
unmanaged_ips_timeout: Determines the timeout between two
notifications (in seconds) about the new unmanaged IP address in
a particular network. The value must be between 60 seconds and
the number of seconds remaining to Jan 2038.
vrf_mapping_policy: The policy type used to define the behavior of
the VRF mapping.
vrf_mapping_rules: VRF mapping rules.
"""
_infoblox_type = 'discovery:gridproperties'
_fields = ['advanced_polling_settings', 'advisor_settings',
'auto_conversion_settings', 'basic_polling_settings',
'cli_credentials', 'discovery_blackout_setting',
'dns_lookup_option', 'dns_lookup_throttle', 'enable_advisor',
'enable_auto_conversion', 'enable_auto_updates', 'grid_name',
'ignore_conflict_duration', 'port_control_blackout_setting',
'ports', 'same_port_control_discovery_blackout',
'snmpv1v2_credentials', 'snmpv3_credentials',
'unmanaged_ips_limit', 'unmanaged_ips_timeout',
'vrf_mapping_policy', 'vrf_mapping_rules']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = ['grid_name']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'auto_conversion_settings': DiscoveryAutoconversionsetting.from_dict,
'cli_credentials': DiscoveryClicredential.from_dict,
'ports': DiscoveryPort.from_dict,
'snmpv1v2_credentials': DiscoverySnmpcredential.from_dict,
'snmpv3_credentials': DiscoverySnmp3Credential.from_dict,
'vrf_mapping_rules': DiscoveryVrfmappingrule.from_dict,
}
def advisor_run_now(self, *args, **kwargs):
return self._call_func("advisor_run_now", *args, **kwargs)
def advisor_test_connection(self, *args, **kwargs):
return self._call_func("advisor_test_connection", *args, **kwargs)
def diagnostic(self, *args, **kwargs):
return self._call_func("diagnostic", *args, **kwargs)
def diagnostic_status(self, *args, **kwargs):
return self._call_func("diagnostic_status", *args, **kwargs)
class DiscoveryMemberproperties(InfobloxObject):
""" DiscoveryMemberproperties: The Grid discovery member properties
object.
Corresponds to WAPI object 'discovery:memberproperties'
The object provides information about the Grid member discovery
properties.
Fields:
address: The Grid member address IP address.
cisco_apic_configurations: Cisco APIC configurations.
cli_credentials: Discovery CLI credentials.
default_seed_routers: Default seed routers.
discovery_member: The name of the network discovery Grid member.
enable_service: Determines if the discovery service is enabled.
gateway_seed_routers: Gateway seed routers.
is_sa: Determines if the standalone mode for discovery network
monitor is enabled or not.
role: Discovery member role.
scan_interfaces: Discovery networks to which the member is assigned.
seed_routers: Seed routers.
snmpv1v2_credentials: Discovery SNMP v1 and v2 credentials.
snmpv3_credentials: Discovery SNMP v3 credentials.
use_cli_credentials: Use flag for: cli_credentials
use_snmpv1v2_credentials: Use flag for: snmpv1v2_credentials
use_snmpv3_credentials: Use flag for: snmpv3_credentials
"""
_infoblox_type = 'discovery:memberproperties'
_fields = ['address', 'cisco_apic_configurations', 'cli_credentials',
'default_seed_routers', 'discovery_member', 'enable_service',
'gateway_seed_routers', 'is_sa', 'role', 'scan_interfaces',
'seed_routers', 'snmpv1v2_credentials', 'snmpv3_credentials',
'use_cli_credentials', 'use_snmpv1v2_credentials',
'use_snmpv3_credentials']
_search_for_update_fields = ['discovery_member']
_updateable_search_fields = ['enable_service', 'is_sa', 'role']
_all_searchable_fields = ['discovery_member', 'enable_service', 'is_sa',
'role']
_return_fields = ['discovery_member']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'cisco_apic_configurations': DiscoveryCiscoapicconfiguration.from_dict,
'cli_credentials': DiscoveryClicredential.from_dict,
'default_seed_routers': DiscoverySeedrouter.from_dict,
'gateway_seed_routers': DiscoverySeedrouter.from_dict,
'scan_interfaces': DiscoveryScaninterface.from_dict,
'seed_routers': DiscoverySeedrouter.from_dict,
'snmpv1v2_credentials': DiscoverySnmpcredential.from_dict,
'snmpv3_credentials': DiscoverySnmp3Credential.from_dict,
}
class DiscoveryStatus(InfobloxObject):
""" DiscoveryStatus: Discovery Status object.
Corresponds to WAPI object 'discovery:status'
The discovery status of discovered data
Fields:
address: The IPv4 Address or IPv6 Address of the device.
cli_collection_enabled: Indicates if CLI collection is enabled.
cli_credential_info: The CLI credential status information of the
device.
existence_info: The existence status information of the device.
fingerprint_enabled: Indicates if DHCP finterprinting is enabled.
fingerprint_info: This DHCP finterprinting status information of the
device.
first_seen: The timestamp when the device was first discovered.
last_action: The timestamp of the last detected interface property
change.
last_seen: The timestamp when the device was last discovered.
last_timestamp: The timestamp of the last executed action for the
device.
name: The name of the device.
network_view: The name of the network view in which this device
resides.
reachable_info: The reachable status information of the device.
snmp_collection_enabled: | |
ctx,
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
dynamic_library = dynamic_library,
dynamic_library_symlink_path =
_shorten_library_symlink(dynamic_library) if dynamic_library and ctx.attr.unique_name else "",
static_library = static_library,
cc_toolchain = cc_toolchain,
)
compilation_context = cc_common.create_compilation_context()
linking_context = cc_common.create_linking_context(
libraries_to_link = [library_to_link],
)
cc_info = cc_common.merge_cc_infos(
cc_infos = [
CcInfo(
compilation_context = compilation_context,
linking_context = linking_context,
),
cc_info,
],
)
output_group_info = OutputGroupInfo(**library_info_output_groups(
name = ctx.label.name,
hs = hs,
hs_info = hs_info,
lib_info = lib_info,
))
result = [default_info, hs_info, cc_info, lib_info, output_group_info]
if ctx.attr.haddock:
result.append(doc_info)
return result
haskell_cabal_library = rule(
_haskell_cabal_library_impl,
attrs = {
"package_name": attr.string(
doc = "Cabal package name. Defaults to name attribute.",
),
"version": attr.string(
doc = "Version of the Cabal package.",
mandatory = True,
),
"haddock": attr.bool(
default = True,
doc = "Whether to generate haddock documentation.",
),
"srcs": attr.label_list(
allow_files = True,
doc = "All files required to build the package, including the Cabal file.",
),
"deps": attr.label_list(
aspects = [haskell_cc_libraries_aspect],
doc = "Package build dependencies. Note, setup dependencies need to be declared separately using `setup_deps`.",
),
"setup_deps": attr.label_list(
aspects = [haskell_cc_libraries_aspect],
doc = "Dependencies for custom setup Setup.hs.",
),
"compiler_flags": attr.string_list(
doc = """Flags to pass to Haskell compiler, in addition to those defined
the cabal file. Subject to Make variable substitution.""",
),
"tools": attr.label_list(
cfg = "host",
allow_files = True,
doc = """Tool dependencies. They are built using the host configuration, since
the tools are executed as part of the build.""",
),
"flags": attr.string_list(
doc = "List of Cabal flags, will be passed to `Setup.hs configure --flags=...`.",
),
"_cabal_wrapper": attr.label(
executable = True,
cfg = "host",
default = Label("@rules_haskell//haskell:cabal_wrapper"),
),
"_cc_toolchain": attr.label(
default = Label("@bazel_tools//tools/cpp:current_cc_toolchain"),
),
"verbose": attr.bool(
default = True,
doc = "Whether to show the output of the build",
),
"unique_name": attr.bool(
default = False,
doc = """Whether the library name is known to be unique within the
workspace. This is used by `stack_snapshot` where library names are
known to be unique within the snapshot. If true, then the dynamic
library symlink underneath `_solib_<cpu>` will be shortened to
avoid exceeding the MACH-O header size limit on MacOS.""",
),
},
toolchains = [
"@bazel_tools//tools/cpp:toolchain_type",
"@rules_haskell//haskell:toolchain",
"@rules_sh//sh/posix:toolchain_type",
],
fragments = ["cpp"],
doc = """\
Use Cabal to build a library.
### Examples
```bzl
haskell_cabal_library(
name = "lib-0.1.0.0",
srcs = ["lib.cabal", "Lib.hs", "Setup.hs"],
)
haskell_toolchain_library(name = "base")
haskell_binary(
name = "bin",
deps = [":base", ":lib-0.1.0.0"],
srcs = ["Main.hs"],
)
```
This rule does not use `cabal-install`. It calls the package's
`Setup.hs` script directly if one exists, or the default one if not.
All sources files that would have been part of a Cabal sdist need to
be listed in `srcs` (crucially, including the `.cabal` file).
A `haskell_cabal_library` can be substituted for any
`haskell_library`. The two are interchangeable in most contexts.
However, using a plain `haskell_library` sometimes leads to better
build times, and does not require drafting a `.cabal` file.
""",
)
def _haskell_cabal_binary_impl(ctx):
hs = haskell_context(ctx)
dep_info = gather_dep_info(ctx, ctx.attr.deps)
setup_dep_info = gather_dep_info(ctx, ctx.attr.setup_deps)
setup_deps = all_dependencies_package_ids(ctx.attr.setup_deps)
cc = cc_interop_info(ctx)
# All C and Haskell library dependencies.
cc_info = cc_common.merge_cc_infos(
cc_infos = [dep[CcInfo] for dep in ctx.attr.deps if CcInfo in dep],
)
# Separate direct C library dependencies.
direct_cc_info = cc_common.merge_cc_infos(
cc_infos = [
dep[CcInfo]
for dep in ctx.attr.deps
if CcInfo in dep and not HaskellInfo in dep
],
)
posix = ctx.toolchains["@rules_sh//sh/posix:toolchain_type"]
exe_name = ctx.attr.exe_name if ctx.attr.exe_name else hs.label.name
user_compile_flags = _expand_make_variables("compiler_flags", ctx, ctx.attr.compiler_flags)
cabal = _find_cabal(hs, ctx.files.srcs)
setup = _find_setup(hs, cabal, ctx.files.srcs)
package_database = hs.actions.declare_file(
"_install/{}.conf.d/package.cache".format(hs.label.name),
sibling = cabal,
)
binary = hs.actions.declare_file(
"_install/bin/{name}{ext}".format(
name = exe_name,
ext = ".exe" if hs.toolchain.is_windows else "",
),
sibling = cabal,
)
data_dir = hs.actions.declare_directory(
"_install/{}_data".format(hs.label.name),
sibling = cabal,
)
(tool_inputs, tool_input_manifests) = ctx.resolve_tools(tools = ctx.attr.tools)
c = _prepare_cabal_inputs(
hs,
cc,
posix,
dep_info,
cc_info,
direct_cc_info,
component = "exe:{}".format(exe_name),
package_id = hs.label.name,
tool_inputs = tool_inputs,
tool_input_manifests = tool_input_manifests,
cabal = cabal,
setup = setup,
setup_deps = setup_deps,
setup_dep_info = setup_dep_info,
srcs = ctx.files.srcs,
compiler_flags = user_compile_flags,
flags = ctx.attr.flags,
generate_haddock = False,
cabal_wrapper = ctx.executable._cabal_wrapper,
package_database = package_database,
verbose = ctx.attr.verbose,
dynamic_binary = binary,
transitive_haddocks = _gather_transitive_haddocks(ctx.attr.deps),
)
ctx.actions.run(
executable = c.cabal_wrapper,
arguments = [c.args],
inputs = c.inputs,
input_manifests = c.input_manifests,
outputs = [
package_database,
binary,
data_dir,
],
tools = [c.cabal_wrapper],
env = c.env,
mnemonic = "HaskellCabalBinary",
progress_message = "HaskellCabalBinary {}".format(hs.label),
)
hs_info = HaskellInfo(
package_databases = dep_info.package_databases,
version_macros = set.empty(),
source_files = depset(),
extra_source_files = depset(),
import_dirs = set.empty(),
hs_libraries = dep_info.hs_libraries,
interface_dirs = dep_info.interface_dirs,
compile_flags = [],
)
default_info = DefaultInfo(
files = depset([binary]),
executable = binary,
runfiles = ctx.runfiles(
files = [data_dir],
transitive_files = c.runfiles,
collect_default = True,
),
)
return [hs_info, cc_info, default_info]
haskell_cabal_binary = rule(
_haskell_cabal_binary_impl,
executable = True,
attrs = {
"exe_name": attr.string(
doc = "Cabal executable component name. Defaults to the value of the name attribute.",
),
"srcs": attr.label_list(
allow_files = True,
doc = "All files required to build the package, including the Cabal file.",
),
"deps": attr.label_list(
aspects = [haskell_cc_libraries_aspect],
doc = "Package build dependencies. Note, setup dependencies need to be declared separately using `setup_deps`.",
),
"setup_deps": attr.label_list(
aspects = [haskell_cc_libraries_aspect],
doc = "Dependencies for custom setup Setup.hs.",
),
"compiler_flags": attr.string_list(
doc = """Flags to pass to Haskell compiler, in addition to those defined
the cabal file. Subject to Make variable substitution.""",
),
"tools": attr.label_list(
cfg = "host",
doc = """Tool dependencies. They are built using the host configuration, since
the tools are executed as part of the build.""",
),
"flags": attr.string_list(
doc = "List of Cabal flags, will be passed to `Setup.hs configure --flags=...`.",
),
"_cabal_wrapper": attr.label(
executable = True,
cfg = "host",
default = Label("@rules_haskell//haskell:cabal_wrapper"),
),
"_cc_toolchain": attr.label(
default = Label("@bazel_tools//tools/cpp:current_cc_toolchain"),
),
"verbose": attr.bool(
default = True,
doc = "Whether to show the output of the build",
),
},
toolchains = [
"@bazel_tools//tools/cpp:toolchain_type",
"@rules_haskell//haskell:toolchain",
"@rules_sh//sh/posix:toolchain_type",
],
fragments = ["cpp"],
doc = """\
Use Cabal to build a binary.
### Examples
```bzl
haskell_cabal_binary(
name = "happy",
srcs = glob(["**"]),
)
```
This rule assumes that the .cabal file defines a single executable
with the same name as the package.
This rule does not use `cabal-install`. It calls the package's
`Setup.hs` script directly if one exists, or the default one if not.
All sources files that would have been part of a Cabal sdist need to
be listed in `srcs` (crucially, including the `.cabal` file).
""",
)
# Temporary hardcoded list of core libraries. This will no longer be
# necessary once Stack 2.0 is released.
#
# TODO remove this list and replace it with Stack's --global-hints
# mechanism.
_CORE_PACKAGES = [
"Cabal",
"array",
"base",
"binary",
"bytestring",
"containers",
"deepseq",
"directory",
"filepath",
"ghc",
"ghc-boot",
"ghc-boot-th",
"ghc-compact",
"ghc-heap",
"ghc-prim",
"ghci",
"haskeline",
"hpc",
"integer-gmp",
"integer-simple",
"libiserv",
"mtl",
"parsec",
"pretty",
"process",
"rts",
"stm",
"template-haskell",
"terminfo",
"text",
"time",
"transformers",
"unix",
"Win32",
"xhtml",
]
_STACK_DEFAULT_VERSION = "2.3.1"
# Only ever need one version, but use same structure as for GHC bindists.
_STACK_BINDISTS = \
{
"2.3.1": {
"linux-x86_64": (
"https://github.com/commercialhaskell/stack/releases/download/v2.3.1/stack-2.3.1-linux-x86_64-static.tar.gz",
"4bae8830b2614dddf3638a6d1a7bbbc3a5a833d05b2128eae37467841ac30e47",
),
"osx-x86_64": (
"https://github.com/commercialhaskell/stack/releases/download/v2.3.1/stack-2.3.1-osx-x86_64.tar.gz",
"73eee7e5f24d11fd0af00cb05f16119e86be5d578c35083250e6b85ed1ca3621",
),
"windows-x86_64": (
"https://github.com/commercialhaskell/stack/releases/download/v2.3.1/stack-2.3.1-windows-x86_64.tar.gz",
"440588c92ffcb42b88fd6455dc68728dae9b08bdd1a683d1cf5f80aa9aa8b014",
),
},
}
def _stack_version_check(repository_ctx, stack_cmd):
"""Returns False if version not recent enough."""
exec_result = _execute_or_fail_loudly(repository_ctx, [stack_cmd, "--numeric-version"])
stack_major_version = int(exec_result.stdout.split(".")[0])
return stack_major_version >= 2
def _parse_components(package, components):
"""Parse and validate a list of Cabal components.
Components take the following shape:
* `lib`: The library component.
* `lib:<package>`: The library component.
* `exe`: The executable component `exe:<package>`.
* `exe:<name>`: An executable component.
Args:
package: string, The package name.
components: list of string, The Cabal components
Returns:
struct(lib, exe):
lib: bool, Whether the package has a library component.
exe: list of string, List of executables.
"""
lib = False
exe = []
for component in components:
if component == "lib":
lib = True
elif component.startswith("lib:"):
if component == "lib:%s" % package:
lib = True
else:
fail("Sublibrary components are not supported: %s in %s" % (component, package), "components")
elif component == "exe":
exe.append(package)
elif component.startswith("exe:"):
exe.append(component[4:])
elif component.startswith("test"):
fail("Cabal test components are not supported: %s in %s" % (component, package), "components")
else:
fail("Invalid Cabal | |
~ ('callcode', :gas, :addr, :wei, :fname, :fparams):
fname = pretty_fname(fname, add_color=add_color)
if type(addr) == int:
addr = hex(addr)
addr = prettify(addr, add_color = add_color)
gas = prettify(gas, parentheses = False, add_color = add_color)
fparams = pretty_memory(fparams, add_color = add_color)
if fname is not None:
if type(fname) == str:
fname = pretty_fname(fname, add_color = add_color)
yield f"{COLOR_WARNING}codecall{ENDC} {addr}.{fname} with:"
else:
yield f"{COLOR_WARNING}codecall{ENDC} {addr} with:"
yield " funct " + prettify(fname, add_color=add_color)
else:
yield f"{COLOR_WARNING}codecall{ENDC} {addr} with:"
if wei != 0:
wei = prettify(wei, parentheses=False, add_color=add_color)
yield f" value {wei} {COLOR_GRAY}wei{ENDC}"
yield f" gas {gas} {COLOR_GRAY}wei{ENDC}"
if fparams is not None:
yield " args {}".format(', '.join(fparams))
elif r ~ ('delegatecall', :gas, :addr, :fname, :fparams):
fname = pretty_fname(fname, add_color=add_color)
if type(addr) == int:
addr = hex(addr)
addr = prettify(addr, add_color = add_color)
gas = prettify(gas, parentheses = False, add_color = add_color)
fparams = pretty_memory(fparams, add_color = add_color)
if fname is not None:
if type(fname) == str:
fname = pretty_fname(fname, add_color = add_color)
yield f"{COLOR_WARNING}delegate{ENDC} {addr}.{fname} with:"
else:
yield f"{COLOR_WARNING}delegate{ENDC} {addr} with:"
yield " funct " + prettify(fname, add_color=add_color)
else:
yield f"{COLOR_WARNING}delegate{ENDC} {addr} with:"
yield f" gas {gas} {COLOR_GRAY}wei{ENDC}"
if fparams is not None:
yield " args {}".format(', '.join(fparams))
elif r ~ ('selfdestruct', :addr):
yield col('selfdestruct(', COLOR_WARNING)+col(pret(addr, add_color=False, parentheses=False), FAIL)+col(')', COLOR_WARNING)
elif r ~ ('precompiled', :var_name, :func_name, :params):
yield "{} = {}({}) {}".format(col(var_name, COLOR_BLUE), func_name, prettify(params, add_color=add_color, parentheses=False),
COLOR_GRAY+'# precompiled'+ENDC)
elif r ~ ('create', :wei, :code):
yield f"create contract with {wei} wei"
yield f" code: {prettify(code)}"
elif r ~ ('create2', :wei, :code, :salt):
yield f"create2 contract with {wei} wei"
yield f" salt: {prettify(salt)}"
yield f" code: {prettify(code)}"
elif r ~ ('call', :gas, :addr, :wei, :fname, :fparams):
if type(addr) == int:
if len(hex(addr)) > 22+2:
addr = padded_hex(addr, 40) # todo: padded hex
else:
addr = hex(addr) # if it's longer, padded hex returns '???'
addr = pret(addr)
gas = pretty_gas(gas, wei, add_color)
if fname is None:
yield f"call {addr} with:"
else:
fname = pretty_fname(fname, add_color=add_color)
if fname == '0x0':
yield f"call {addr} with:"
elif type(fname) == str:
yield f"call {addr}.{pret(fname)} with:"
else:
yield f"call {addr} with:"
yield f" funct {pret(fname)}"
if wei != 0:
wei = prettify(wei, parentheses=False, add_color=add_color)
yield f" value {wei} {COLOR_GRAY}wei{ENDC}"
yield f" gas {gas} {COLOR_GRAY}wei{ENDC}"
if fparams is not None:
fparams = pretty_memory(fparams, add_color=add_color)
yield " args {}".format(', '.join(fparams))
elif r ~ ('staticcall', :gas, :addr, :wei, :fname, :fparams):
if type(addr) == int:
addr = hex(addr)
addr = prettify(addr, add_color=add_color, parentheses=False)
gas = pretty_gas(gas, wei, add_color)
if fname is not None:
fanme = fname
fname = pretty_fname(fname, add_color=add_color)
if fname == '0x0':
yield f"static call {addr} with:"
elif type(fname) == str and fname != '0x0':
yield f"static call {addr}.{pret(fname)} with:"
else:
yield f"static call {addr} with:"
yield f" funct {pret(fname)}"
else:
yield f"static call {addr} with:"
yield f" gas {gas} {COLOR_GRAY}wei{ENDC}"
if fparams is not None:
fparams = pretty_memory(fparams, add_color=add_color)
yield " args {}".format(', '.join(fparams))
elif r ~ ('label', :name, :setvars):
yield COLOR_GREEN + f'loop {str(name)} setvars: {str(setvars)}' + ENDC
elif r ~ ('goto', *rest):
yield COLOR_GREEN + f'continue {str(rest)}'+ENDC
elif r ~ ('continue', :jd, :setvars):
for v in setvars:
yield str(list(pretty_line(v, add_color=True))[0])
yield COLOR_GREEN + 'continue ' + ENDC # +str(jd)+ENDC
elif r ~ ('setvar', ...):
yield prettify(r, add_color=add_color)
elif r ~ ('setmem', ...):
yield prettify(r, add_color=add_color)
elif r ~ ('set', :idx, :val):
if val ~ ('add', int:v, idx):
assert v != 0
if v == -1:
yield prettify(idx, add_color=add_color) + '--'
elif v == 1:
yield prettify(idx, add_color=add_color) + '++'
elif v < 0:
yield prettify(idx, add_color=add_color) + ' -= ' + prettify(-v, add_color=add_color, parentheses=False)
else:
yield prettify(idx, add_color=add_color) + ' += ' + prettify(v, add_color=add_color, parentheses=False)
elif val ~ ('add', idx, ('mul', -1, :v)):
yield prettify(idx, add_color=add_color) + ' -= ' + prettify(v, add_color=add_color, parentheses=False)
elif val ~ ('add', idx, :v):
yield prettify(idx, add_color=add_color) + ' += ' + prettify(v, add_color=add_color, parentheses=False)
elif val ~ ('add', ('mul', -1, :v), idx):
yield prettify(idx, add_color=add_color) + ' -= ' + prettify(v, add_color=add_color, parentheses=False)
elif val ~ ('add', :v, idx):
yield prettify(idx, add_color=add_color) + ' += ' + prettify(v, add_color=add_color, parentheses=False)
else:
yield prettify(idx, add_color=add_color) + ' = ' + prettify(val, add_color=add_color, parentheses=False)
elif r ~ ('stop', ...):
yield 'stop'
elif r ~ ('undefined', *params):
yield COLOR_WARNING + '...' + ENDC + COLOR_GRAY + f' # unusual jump {params}, couldn\'t decompile, sorry' + ENDC
elif r ~ ('invalid', *rest):
if len(rest) > 0:
yield "revert "# + COLOR_GRAY + f"# {rest}" + ENDC
else:
yield "revert"
elif r ~ ('invalid', ...):
yield "revert " + (COLOR_GRAY + f"# {rest}" + ENDC)
elif (r ~ ('revert', 0)) or \
(r ~ ('revert', ('mem', 0, 0))):
yield "revert"
elif r ~ (:op, ('mem', ('range', :mem_idx, :mem_len))) and \
op in ('revert', 'return'):
if op == 'revert':
yield 'revert with memory'
else:
yield op + ' memory'
if len ~ ('sub', :mem_until, mem_idx):
yield f" from {pret(mem_idx)}"
yield f" to {pret(mem_until)}"
else:
yield " from " + pret(mem_idx)
yield " " + col('len', COLOR_WARNING) + " " + pret(mem_len)
elif r ~ (:op, :param) and op in ('return', 'revert'):
if op == 'revert':
op = 'revert with'
res_mem = pretty_memory(param, add_color=True)
ret_val = ', '.join(res_mem)
if len(clean_color(ret_val)) < 120 or opcode(param) != 'data':
yield f'{op} {ret_val}'
else:
# split long returns into lines. e.g. kitties.getKitten, or kitties.tokenMetadata
# yield str(len(ret_val))
res_mem = list(res_mem)
if res_mem[0] == '32':
res_mem.pop(0)
res_mem[0] = '32, ' + res_mem[0] # happens often, this is probably an array structure,
# and sole `32` in first line looks ugly
yield f'{op} {res_mem[0]}, '
for idx, l in enumerate(res_mem[1:]):
yield ' '*len(op) + ' ' + l + (',' if idx != len(res_mem) - 2 else '')
# assert op == 'revert'
# yield "{} with {}".format(op, ret_val) # adding 'with' to make it more readable
elif r ~ ('store', :size, :off, :idx, :val):
stor_addr = prettify(('stor', size, off, idx), add_color=add_color)
stor_val = prettify(val, add_color=add_color, parentheses=False)
yield "{} = {}".format(stor_addr, stor_val)
elif type(r) == list and len(r) > 1:
yield "{} {}".format(r[0], ', '.join([prettify(x, True, False, add_color = add_color) for x in r[1:]]))
elif type(r) == list:
yield str(r[0])
else:
yield str(r)
def pretty_type(t):
if t ~ ('def', :name, :loc, ('mask', :size, :off)):
return pretty_type(('def', name, loc, size)) + COLOR_GRAY + (f' offset {off}' if off > 0 else '') + ENDC
elif t ~ ('def', :name, :loc, :bts):
if type(loc) == int and loc > 1000:
loc = hex(loc)
return f' {COLOR_GREEN}{name}{ENDC} is {pretty_type(bts)} {COLOR_GRAY}at storage {loc}{ENDC}'
elif t ~ ('struct', 1):
return 'struct'
elif t == 'struct':
return 'struct'
elif t ~ ('struct', int:num):
return f'struct {num} bytes'
elif t ~ ('array', :bts):
return f'array of '+pretty_type(bts)
elif t ~ ('mapping', :bts):
return f'mapping of '+pretty_type(bts)
elif type(t) == int:
return mask_to_type(t, force=True)
else:
assert False, f'unknown type {t}'
def pretty_stor(exp, add_color=True):
col = partial(colorize, color=COLOR_GREEN, add_color=add_color)
stor = partial(pretty_stor, add_color=add_color)
pret = partial(prettify, parentheses=False, add_color=add_color)
if exp ~ ('stor', ('length', :idx)):
return stor(idx) + col('.length')
if exp ~ ('loc', :loc):
return col(f'stor_l{loc}')
if exp ~ ('name', :name, :loc):
return col(name)
# if exp ~ ('stor', (:op, :param)) and op in ('loc', 'name'):
# with top-level fields, it's just a different stor
# variable. with lower-level we treat it as a struct
# return stor((op, param))
if exp ~ ('stor', :loc):
# with top-level fields, it's just a different stor
# variable. with lower-level we treat it as a struct
return stor(loc)
if exp ~ ('field', :off, :loc):
return stor(loc) + col(f'.field_{pret(off, add_color=False)}')
if exp ~ ('type', :size, :loc):
if size == 256:
# prettify removes 256 masks by default, force it
return col('uint256(', color=COLOR_GRAY) + stor(loc) + col(')', color=COLOR_GRAY)
else:
return pret(('mask', size, 0, stor(loc)))
def pr_idx(idx):
if idx ~ ('data', *terms):
return col('][').join([pret(t) for t in terms])
else:
return pret(idx)
if exp ~ ('map', :idx, | |
from directory_constants.constants import cms
from django.forms import Textarea, CheckboxSelectMultiple
from django.utils.text import slugify
from modelcluster.fields import ParentalManyToManyField
from wagtail.admin.edit_handlers import (
HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel
)
from wagtail.images.edit_handlers import ImageChooserPanel
from django.db import models
from core.model_fields import MarkdownField
from core.models import (
BasePage,
ExclusivePageMixin,
ServiceMixin,
)
from core.panels import SearchEngineOptimisationPanel
from export_readiness.models import Tag
class GreatInternationalApp(ExclusivePageMixin, ServiceMixin, BasePage):
slug_identity = 'great-international-app'
service_name_value = cms.GREAT_INTERNATIONAL
@classmethod
def get_required_translatable_fields(cls):
return []
@classmethod
def allowed_subpage_models(cls):
return [InternationalArticleListingPage,
InternationalTopicLandingPage,
InternationalCuratedTopicLandingPage,
InternationalRegionPage,
InternationalHomePage]
class InternationalSectorPage(BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
parent_page_types = ['great_international.InternationalTopicLandingPage']
subpage_types = []
tags = ParentalManyToManyField(Tag, blank=True)
heading = models.CharField(max_length=255, verbose_name='Sector name')
sub_heading = models.CharField(max_length=255, blank=True)
hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
on_delete=models.SET_NULL,
related_name='+'
)
heading_teaser = models.TextField(blank=True, verbose_name='Introduction')
section_one_body = MarkdownField(
null=True,
verbose_name='3 unique selling points markdown'
)
section_one_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Image for unique selling points'
)
section_one_image_caption = models.CharField(
max_length=255,
blank=True,
verbose_name='Image caption')
section_one_image_caption_company = models.CharField(
max_length=255,
blank=True,
verbose_name='Image caption attribution')
statistic_1_number = models.CharField(max_length=255)
statistic_1_heading = models.CharField(max_length=255)
statistic_1_smallprint = models.CharField(max_length=255, blank=True)
statistic_2_number = models.CharField(max_length=255)
statistic_2_heading = models.CharField(max_length=255)
statistic_2_smallprint = models.CharField(max_length=255, blank=True)
statistic_3_number = models.CharField(max_length=255, blank=True)
statistic_3_heading = models.CharField(max_length=255, blank=True)
statistic_3_smallprint = models.CharField(max_length=255, blank=True)
statistic_4_number = models.CharField(max_length=255, blank=True)
statistic_4_heading = models.CharField(max_length=255, blank=True)
statistic_4_smallprint = models.CharField(max_length=255, blank=True)
statistic_5_number = models.CharField(max_length=255, blank=True)
statistic_5_heading = models.CharField(max_length=255, blank=True)
statistic_5_smallprint = models.CharField(max_length=255, blank=True)
statistic_6_number = models.CharField(max_length=255, blank=True)
statistic_6_heading = models.CharField(max_length=255, blank=True)
statistic_6_smallprint = models.CharField(max_length=255, blank=True)
section_two_heading = models.CharField(
max_length=255,
verbose_name='Spotlight'
)
section_two_teaser = models.TextField(
verbose_name='Spotlight summary'
)
section_two_subsection_one_icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Spotlight 1 icon'
)
section_two_subsection_one_heading = models.CharField(
max_length=255,
verbose_name='Spotlight 1 heading'
)
section_two_subsection_one_body = models.TextField(
verbose_name='Spotlight 1 body'
)
section_two_subsection_two_icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Spotlight 2 icon'
)
section_two_subsection_two_heading = models.CharField(
max_length=255,
verbose_name='Spotlight 2 heading'
)
section_two_subsection_two_body = models.TextField(
verbose_name='Spotlight 2 body'
)
section_two_subsection_three_icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Spotlight 3 icon'
)
section_two_subsection_three_heading = models.CharField(
max_length=255,
verbose_name='Spotlight 3 heading'
)
section_two_subsection_three_body = models.TextField(
verbose_name='Spotlight 3 body'
)
case_study_title = models.CharField(max_length=255, blank=True)
case_study_description = models.CharField(max_length=255, blank=True)
case_study_cta_text = models.TextField(
blank=True,
verbose_name='Case study link text'
)
case_study_cta_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Case study link URL'
)
case_study_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
section_three_heading = models.CharField(
max_length=255,
blank=True,
verbose_name='Fact sheets heading'
)
section_three_teaser = models.TextField(
blank=True,
verbose_name='Fact sheets teaser'
)
section_three_subsection_one_heading = models.CharField(
max_length=255,
blank=True,
verbose_name='Fact sheet 1 heading'
)
section_three_subsection_one_teaser = models.TextField(
blank=True,
verbose_name='Fact sheet 1 teaser'
)
section_three_subsection_one_body = MarkdownField(
blank=True,
null=True,
verbose_name='Fact sheet 1 body'
)
section_three_subsection_two_heading = models.CharField(
max_length=255,
blank=True,
verbose_name='Fact sheet 2 heading'
)
section_three_subsection_two_teaser = models.TextField(
blank=True,
verbose_name='Fact sheet 2 teaser'
)
section_three_subsection_two_body = MarkdownField(
blank=True,
null=True,
verbose_name='Fact sheet 2 body'
)
related_page_one = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
related_page_two = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
related_page_three = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
content_panels = [
MultiFieldPanel(
heading='Heading',
children=[
FieldPanel('heading'),
FieldPanel('sub_heading'),
ImageChooserPanel('hero_image'),
FieldPanel('heading_teaser')
]
),
MultiFieldPanel(
heading='Unique selling points',
children=[
HelpPanel(
'Use H2 (##) markdown for the three subheadings'),
FieldRowPanel(
[
FieldPanel('section_one_body'),
MultiFieldPanel(
[
ImageChooserPanel('section_one_image'),
FieldPanel('section_one_image_caption'),
FieldPanel('section_one_image_caption_company')
]
)
]
)
]
),
MultiFieldPanel(
heading='Statistics',
children=[
FieldRowPanel(
[
MultiFieldPanel(
[
FieldPanel('statistic_1_number'),
FieldPanel('statistic_1_heading'),
FieldPanel('statistic_1_smallprint')
]
),
MultiFieldPanel(
[
FieldPanel('statistic_2_number'),
FieldPanel('statistic_2_heading'),
FieldPanel('statistic_2_smallprint')
]
),
MultiFieldPanel(
[
FieldPanel('statistic_3_number'),
FieldPanel('statistic_3_heading'),
FieldPanel('statistic_3_smallprint')
]
),
MultiFieldPanel(
[
FieldPanel('statistic_4_number'),
FieldPanel('statistic_4_heading'),
FieldPanel('statistic_4_smallprint')
]
),
MultiFieldPanel(
[
FieldPanel('statistic_5_number'),
FieldPanel('statistic_5_heading'),
FieldPanel('statistic_5_smallprint')
]
),
MultiFieldPanel(
[
FieldPanel('statistic_6_number'),
FieldPanel('statistic_6_heading'),
FieldPanel('statistic_6_smallprint')
]
),
]
)
]
),
MultiFieldPanel(
heading='Spotlight',
children=[
FieldPanel('section_two_heading'),
FieldPanel('section_two_teaser'),
FieldRowPanel(
[
MultiFieldPanel(
[
ImageChooserPanel(
'section_two_subsection_one_icon'),
FieldPanel(
'section_two_subsection_one_heading'),
FieldPanel(
'section_two_subsection_one_body')
]
),
MultiFieldPanel(
[
ImageChooserPanel(
'section_two_subsection_two_icon'),
FieldPanel(
'section_two_subsection_two_heading'),
FieldPanel(
'section_two_subsection_two_body')
]
),
MultiFieldPanel(
[
ImageChooserPanel(
'section_two_subsection_three_icon'),
FieldPanel(
'section_two_subsection_three_heading'),
FieldPanel(
'section_two_subsection_three_body')
]
)
]
)
]
),
MultiFieldPanel(
heading='Case Study',
classname='collapsible',
children=[
FieldPanel('case_study_title'),
FieldPanel('case_study_description'),
FieldPanel('case_study_cta_text'),
PageChooserPanel(
'case_study_cta_page',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
ImageChooserPanel('case_study_image')
]
),
MultiFieldPanel(
heading='Fact Sheets',
classname='collapsible collapsed',
children=[
FieldPanel('section_three_heading'),
FieldPanel('section_three_teaser'),
FieldRowPanel(
[
MultiFieldPanel(
[
FieldPanel(
'section_three_subsection_one_heading'),
FieldPanel(
'section_three_subsection_one_teaser'),
HelpPanel(
'For accessibility reasons, use only '
'"#### [Your text here]" for subheadings '
'in this markdown field'),
FieldPanel(
'section_three_subsection_one_body')
]
),
MultiFieldPanel(
[
FieldPanel(
'section_three_subsection_two_heading'),
FieldPanel(
'section_three_subsection_two_teaser'),
HelpPanel(
'For accessibility reasons, use only '
'"#### [Your text here]" for subheadings '
'in this markdown field'),
FieldPanel(
'section_three_subsection_two_body')
]
)
]
)
]
),
MultiFieldPanel(
heading='Related articles',
children=[
FieldRowPanel([
PageChooserPanel(
'related_page_one',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
PageChooserPanel(
'related_page_two',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
PageChooserPanel(
'related_page_three',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
])
]
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('title_en_gb'),
FieldPanel('slug'),
FieldPanel('tags', widget=CheckboxSelectMultiple)
]
class InternationalHomePage(ExclusivePageMixin, BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
slug_identity = cms.GREAT_HOME_INTERNATIONAL_SLUG
subpage_types = []
tariffs_title = models.CharField(max_length=255)
tariffs_description = MarkdownField()
tariffs_link = models.URLField()
tariffs_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
news_title = models.CharField(max_length=255)
related_page_one = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
related_page_two = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
related_page_three = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
content_panels = [
MultiFieldPanel(
heading='Tariffs',
children=[
FieldPanel('tariffs_title'),
FieldPanel('tariffs_description'),
FieldPanel('tariffs_link'),
ImageChooserPanel('tariffs_image')
]
),
MultiFieldPanel(
heading='News section',
children=[
FieldPanel('news_title'),
FieldRowPanel([
PageChooserPanel(
'related_page_one',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
PageChooserPanel(
'related_page_two',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
PageChooserPanel(
'related_page_three',
[
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]),
])
]
),
SearchEngineOptimisationPanel(),
]
settings_panels = [
FieldPanel('title_en_gb'),
FieldPanel('slug'),
]
class InternationalRegionPage(BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
parent_page_types = ['great_international.GreatInternationalApp']
subpage_types = [
'great_international.InternationalLocalisedFolderPage'
]
tags = ParentalManyToManyField(Tag, blank=True)
settings_panels = [
FieldPanel('title_en_gb'),
FieldPanel('slug'),
FieldPanel('tags', widget=CheckboxSelectMultiple)
]
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
class InternationalLocalisedFolderPage(BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
parent_page_types = ['great_international.InternationalRegionPage']
subpage_types = [
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage'
]
settings_panels = [
FieldPanel('title_en_gb'),
FieldPanel('slug'),
]
def save(self, *args, **kwargs):
if self.pk is None:
self.slug = slugify(f'{self.slug}-{self.get_parent().slug}')
return super().save(*args, **kwargs)
class InternationalArticlePage(BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
parent_page_types = [
'great_international.InternationalArticleListingPage',
'great_international.InternationalCampaignPage',
'great_international.InternationalLocalisedFolderPage',
'great_international.InternationalCuratedTopicLandingPage',
'great_international.InternationalGuideLandingPage',
]
subpage_types = []
article_title = models.CharField(max_length=255)
article_teaser = models.CharField(max_length=255)
article_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
article_body_text = MarkdownField()
related_page_one = models.ForeignKey(
'great_international.InternationalArticlePage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
related_page_two = models.ForeignKey(
'great_international.InternationalArticlePage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
related_page_three = models.ForeignKey(
'great_international.InternationalArticlePage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
tags = ParentalManyToManyField(Tag, blank=True)
content_panels = [
FieldPanel('article_title'),
MultiFieldPanel(
heading='Article content',
children=[
FieldPanel('article_teaser'),
ImageChooserPanel('article_image'),
FieldPanel('article_body_text')
]
),
MultiFieldPanel(
heading='Related articles',
children=[
FieldRowPanel([
PageChooserPanel(
'related_page_one',
'great_international.InternationalArticlePage'),
PageChooserPanel(
'related_page_two',
'great_international.InternationalArticlePage'),
PageChooserPanel(
'related_page_three',
'great_international.InternationalArticlePage'),
]),
]
),
SearchEngineOptimisationPanel(),
]
settings_panels = [
FieldPanel('title_en_gb'),
FieldPanel('slug'),
FieldPanel('tags', widget=CheckboxSelectMultiple)
]
class InternationalArticleListingPage(BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
parent_page_types = [
'great_international.GreatInternationalApp',
'great_international.InternationalTopicLandingPage'
]
subpage_types = [
'great_international.InternationalArticlePage',
'great_international.InternationalCampaignPage',
]
landing_page_title = models.CharField(max_length=255)
hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
hero_teaser = models.CharField(max_length=255, null=True, blank=True)
list_teaser = MarkdownField(null=True, blank=True)
tags = ParentalManyToManyField(Tag, blank=True)
@property
def articles_count(self):
return self.get_descendants().type(
InternationalArticlePage
).live().count()
content_panels = [
FieldPanel('landing_page_title'),
MultiFieldPanel(
heading='Hero',
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_teaser')
]
),
FieldPanel('list_teaser'),
SearchEngineOptimisationPanel(),
]
settings_panels = [
FieldPanel('title_en_gb'),
FieldPanel('slug'),
FieldPanel('tags', widget=CheckboxSelectMultiple)
]
class InternationalCampaignPage(BasePage):
service_name_value = cms.GREAT_INTERNATIONAL
parent_page_types = [
'great_international.InternationalArticleListingPage',
'great_international.InternationalTopicLandingPage',
'great_international.InternationalLocalisedFolderPage'
]
subpage_types = [
'great_international.InternationalArticlePage'
]
view_path = 'campaigns/'
campaign_teaser = models.CharField(max_length=255, null=True, blank=True)
campaign_heading = models.CharField(max_length=255)
campaign_hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
section_one_heading = models.CharField(max_length=255)
section_one_intro = MarkdownField()
section_one_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
selling_point_one_icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
selling_point_one_heading = models.CharField(max_length=255)
selling_point_one_content = MarkdownField()
selling_point_two_icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
selling_point_two_heading = models.CharField(
max_length=255,
null=True,
blank=True,
)
selling_point_two_content = MarkdownField(null=True, blank=True)
selling_point_three_icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
selling_point_three_heading = models.CharField(
max_length=255,
null=True,
blank=True
)
selling_point_three_content = MarkdownField(null=True, blank=True)
section_one_contact_button_url = models.CharField(
max_length=255,
null=True,
blank=True
)
section_one_contact_button_text = models.CharField(
max_length=255,
null=True,
blank=True
)
section_two_heading = models.CharField(max_length=255)
section_two_intro = MarkdownField()
section_two_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
section_two_contact_button_url = models.CharField(
max_length=255,
null=True,
blank=True
)
section_two_contact_button_text = models.CharField(
max_length=255,
null=True,
blank=True
)
related_content_heading = models.CharField(max_length=255)
related_content_intro = MarkdownField()
related_page_one = models.ForeignKey(
'great_international.InternationalArticlePage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
related_page_two = models.ForeignKey(
'great_international.InternationalArticlePage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
related_page_three = models.ForeignKey(
'great_international.InternationalArticlePage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
cta_box_message = models.CharField(max_length=255)
cta_box_button_url = models.CharField(max_length=255)
cta_box_button_text = models.CharField(max_length=255)
tags = ParentalManyToManyField(Tag, blank=True)
content_panels = [
MultiFieldPanel(
heading='Hero section',
children=[
FieldPanel('campaign_heading'),
FieldPanel('campaign_teaser'),
ImageChooserPanel('campaign_hero_image'),
]
),
MultiFieldPanel(
heading='Section one',
children=[
FieldPanel('section_one_heading'),
FieldPanel('section_one_intro'),
ImageChooserPanel('section_one_image'),
FieldRowPanel([
MultiFieldPanel(
children=[
ImageChooserPanel('selling_point_one_icon'),
FieldPanel('selling_point_one_heading'),
FieldPanel('selling_point_one_content'),
]
),
MultiFieldPanel(
children=[
ImageChooserPanel('selling_point_two_icon'),
FieldPanel('selling_point_two_heading'),
FieldPanel('selling_point_two_content'),
]
),
MultiFieldPanel(
children=[
ImageChooserPanel('selling_point_three_icon'),
FieldPanel('selling_point_three_heading'),
FieldPanel('selling_point_three_content'),
]
),
]),
FieldRowPanel([
FieldPanel('section_one_contact_button_text'),
| |
graphical simulation interface
#########################################################
def draw(self,dc,transform,px,py):
c = self.properties.get('color','blue')
dc.SetPen(wx.Pen(c,1,wx.SOLID))
dc.SetBrush(wx.Brush(c))
radius = transform[0]/16
dc.DrawCircle(px,py,radius)
def draw_on_link(self,dc,transform,n1,n2):
px = n1[0] + int(0.2*(n2[0] - n1[0]))
py = n1[1] + int(0.2*(n2[1] - n1[1]))
self.draw(dc,transform,px,py)
def nearby(self,pos,n1,n2):
px = n1[0] + 0.2*(n2[0] - n1[0])
py = n1[1] + 0.2*(n2[1] - n1[1])
dx = px - pos[0]
dy = py - pos[1]
if abs(dx) < .1 and abs(dy) < .1:
return self.status()
else: return None
def status(self):
return self.__repr__()
################################################################################
#
# Network -- a collection of network nodes, links and packets
#
# Network.make_node(loc,address=None) -- make a new network node
# Network.add_node(x,y,address=None) -- add a new node at specified location
# Network.find_node(x,y) -- return node at given location
# Network.map_node(f,default=0) -- see below
# Network.make_link(n1,n2) -- make a new link between n1 and n2
# Network.add_link(x1,y2,x2,y2) -- add link between specified nodes
#
# Network.make_packet(src,dst,type,start,**props) -- make a new packet
# Network.duplicate_packet(p) -- duplicate a packet
#
# Network.reset() -- initialize network state
# Network.step(count=1) -- simulate count timesteps
#
################################################################################
class Network:
def __init__(self,simtime):
self.nodes = {}
self.addresses = {}
self.nlist = []
self.links = []
self.time = 0
self.pending = 0
self.packets = []
self.npackets = 0
self.max_x = 0
self.max_y = 0
self.simtime = simtime
self.playstep = 1.0 # 1 second play step by default
self.numnodes = 0 # TBD
# override to make your own type of node
def make_node(self,loc,address=None):
return Node(loc,address=address)
# add a node to the network
def add_node(self,x,y,address=None):
n = self.find_node(x,y)
if n is None:
n = self.make_node((x,y),address=address)
n.network = self
if address is not None:
self.addresses[address] = n
self.nlist.append(n)
ynodes = self.nodes.get(x,{})
ynodes[y] = n
self.nodes[x] = ynodes
self.max_x = max(self.max_x,x)
self.max_y = max(self.max_y,y)
return n
def set_nodes(self,n):
self.numnodes = n
# locate a node given its location
def find_node(self,x,y):
ynodes = self.nodes.get(x,None)
if ynodes is not None:
return ynodes.get(y,None)
return None
# apply f to each network node in top-to-bottom, left-to-right
# order. Returns list of return values (default value is used
# if a particular grid point doesn't contain a node). Useful
# for gathering statistical data that can be processed by Matlab.
def map_node(self,f,default=0):
result = []
for row in xrange(self.max_y+1):
for col in xrange(self.max_x+1):
node = self.find_node(row,col)
if node: result.append(f(node))
else: result.append(default)
return result
# override to make your own type of link
def make_link(self,n1,n2):
return Link(n1,n2)
# add a link between nodes at the specified locations
def add_link(self,x1,y1,x2,y2):
n1 = self.find_node(x1,y1)
n2 = self.find_node(x2,y2)
if n1 is not None and n2 is not None:
link = self.make_link(n1,n2)
link.network = self
self.links.append(link)
# override to make your own type of packet
def make_packet(self,src,dest,type,start,**props):
p = Packet(src,dest,type,start,**props)
p.network = self
self.packets.append(p)
self.npackets += 1
return p
# duplicate existing packet
def duplicate_packet(self,old):
return self.make_packet(old.source,old.destination,old.type,self.time,
**old.properties)
# compute manhattan distance between two nodes
def manhattan_distance(self,n1,n2):
dx = n1[0] - n2[0]
dy = n1[1] - n2[1]
return abs(dx) + abs(dy)
# return network to initial state
def reset(self):
for n in self.nlist: n.reset()
self.time = 0
self.pending = 0
self.packets = []
self.npackets = 0
self.pending = 1 # ensure at least simulation step
# simulate network one timestep at a time. At each timestep
# each node processes one packet from each of its incoming links
def step(self,count=1):
stop_time = self.time + count
while self.time < stop_time and self.pending > 0:
# phase 1: nodes collect one packet from each link
for n in self.nlist: n.phase1()
# phase 2: nodes process collected packets, perhaps sending
# some to outgoing links. Also nodes can originate packets
# of their own.
self.pending = 0
for n in self.nlist: self.pending += n.phase2(self.time)
# increment time
self.time += 1
return self.pending
#########################################################
# support for graphical simulation interface
#########################################################
def draw(self,dc,transform):
# draw links
for link in self.links:
link.draw(dc,transform)
# draw nodes
for node in self.nlist:
node.draw(dc,transform)
def click(self,pos,which):
for node in self.nlist:
if node.click(pos,which):
return True
else:
for link in self.links:
if link.click(pos,which):
return True
return False
def status(self,statusbar,pos):
for node in self.nlist:
msg = node.nearby(pos)
if msg: break
else:
for link in self.links:
msg = link.nearby(pos)
if msg: break
else:
msg = ''
statusbar.SetFieldsCount(4)
statusbar.SetStatusWidths([80,80,80,-1])
statusbar.SetStatusText('Time: %d' % self.time, 0)
statusbar.SetStatusText('Pending: %s' % self.pending, 1)
statusbar.SetStatusText('Total: %s' % self.npackets, 2)
statusbar.SetStatusText('Status: %s' % msg, 3)
grid_node_names = ['alpha', 'bravo', 'charlie', 'delta', 'echo', 'foxtrot',
'golf', 'hotel', 'india', 'juliet', 'kilo', 'lima', 'mike',
'november', 'oscar', 'papa', 'quebec', 'romeo', 'sierra',
'tango', 'uniform', 'victor', 'whiskey', 'xray', 'yankee',
'zulu']
class GridNetwork(Network):
# make a grid network of specified size
def __init__(self,nrows,ncols):
Network.__init__(self)
# make a manhattan grid of nodes
for r in xrange(nrows):
for c in xrange(ncols):
index = r*ncols + c
addr = grid_node_names[index % len(grid_node_names)]
if index >= len(grid_node_names):
addr += str(index / len(grid_node_names))
self.add_node(r,c,address=addr)
for r in xrange(nrows):
# horizontal links first
for c in xrange(ncols):
if c > 0: self.add_link(r,c,r,c-1)
# then vertical links
for c in xrange(ncols):
if r > 0: self.add_link(r,c,r-1,c)
################################################################################
#
# NetSim -- a graphical front end for network simulations
#
################################################################################
# convert from network to screen coords
# transform = (scale,(xoffset,yoffset))
def net2screen(loc,transform):
return (transform[1][0]+loc[0]*transform[0],
transform[1][1]+loc[1]*transform[0])
# convert from screen to network coords
# transform = (scale,(xoffset,yoffset))
def screen2net(loc,transform):
return (float(loc[0]-transform[1][0])/transform[0],
float(loc[1]-transform[1][1])/transform[0])
# is pt within distance of line between end1 and end2?
def nearby(pt,end1,end2,distance):
if end1[0] == end2[0]: # vertical wire
if abs(pt[0] - end1[0]) > distance:
return False
y1 = min(end1[1],end2[1])
y2 = max(end1[1],end2[1])
return pt[1] >= y1 - distance and pt[1] <= y2 + distance
elif end1[1] == end2[1]: # horizontal wire
if abs(pt[1] - end1[1]) > distance:
return False
x1 = min(end1[0],end2[0])
x2 = max(end1[0],end2[0])
return pt[0] >= x1 - distance and pt[0] <= x2 + distance
else: # non-manhattan wire
# slope and intercept for line between end1 and end2
slope1 = float(end1[1] - end2[1])/(end1[0] - end2[0])
intercept1 = float(end1[1]) - slope1*end1[0]
# slope and intercept for perpendicular line passing through pt
slope2 = -1/slope1
intercept2 = float(pt[1]) - slope2*pt[0]
# x coordinate of intersection of those two lines
xi = (intercept2 - intercept1)/(slope1 - slope2)
if xi < min(end1[0],end2[0]) or xi > max(end1[0],end2[0]):
return False
dx = pt[0] - xi;
dy = pt[1] - (slope2*xi + intercept2)
return (dx*dx) + (dy*dy) <= distance*distance
# A panel that displays a network
class NetPanel(wx.Panel):
def __init__(self,parent,statusbar):
wx.Panel.__init__(self,parent,-1,wx.DefaultPosition,(10,10))
self.SetBackgroundColour('white')
self.SetMinSize((100,100))
self.statusbar = statusbar
self.network = None
self.setupBuffer = False
self.redraw = False
self.playmode = False
self.lastplaytime = 0
self.transform = (2,(0,0))
self.SetupBuffer()
self.Bind(wx.EVT_PAINT,self.OnPaint)
self.Bind(wx.EVT_SIZE,self.OnSize)
self.Bind(wx.EVT_IDLE,self.OnIdle)
self.Bind(wx.EVT_MOTION,self.OnMotion)
self.Bind(wx.EVT_LEFT_DOWN,self.OnLeftClick)
def SetupBuffer(self):
# use an off-screen drawing buffer to reduce flicker
size = self.GetClientSize()
self.buffer = wx.EmptyBitmap(size.width,size.height)
self.setupBuffer = False
self.redraw = True # fill up new buffer
def OnSize(self,event):
# wait until IDLE to actually do refresh just in case there
# are multiple SIZE events in a row that we can roll into one
self.setupBuffer = True
def OnClick(self,event,which):
pos = screen2net(event.GetPositionTuple(),self.transform)
if self.network.click(pos,which):
self.redraw = True
def OnLeftClick(self,event):
self.OnClick(event,'left')
def OnMotion(self,event):
pos = screen2net(event.GetPositionTuple(),self.transform)
self.network.status(self.statusbar,pos)
def OnIdle(self,event):
if self.setupBuffer:
# create a new drawing buffer
self.SetupBuffer()
if self.redraw:
self.DrawNetwork()
self.Refresh(False)
self.redraw = False
self.network.status(self.statusbar,(-10,-10))
if self.playmode == True:
self.redraw = True
curtime = time.clock()
delta = curtime - self.lastplaytime
if delta > self.network.playstep:
if self.network.simtime > self.network.time:
self.network.step(1)
self.lastplaytime = curtime
else:
self.playmode = False
event.RequestMore()
def OnPaint(self,event):
# just refresh the screen from our buffer
dc = wx.BufferedPaintDC(self,self.buffer)
def OnReset(self,event):
self.network.reset()
self.network.status(self.statusbar,(-10,-10))
self.redraw = True
def OnStep(self,event):
button = event.GetEventObject().GetLabel()
arg = button[button.find(' '):]
if arg == ' all': count = self.network.simtime-self.network.time
else: count = int(arg)
self.network.step(count=count)
self.network.status(self.statusbar,(-10,-10))
self.redraw = True
def OnPlay(self,event):
self.playmode = True
def OnPause(self,event):
self.playmode = False
def OnNNodes(self,event):
nnodes = event.GetEventObject().GetValue()
self.network.set_nodes(nnodes)
self.redraw = True
def OnExit(self,event):
self.network.status(self.statusbar,(-10,-10))
self.redraw = True
sys.exit(1)
def DrawNetwork(self):
# erase buffer
dc = wx.BufferedDC(None,self.buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
# compute grid size for network
size = self.GetClientSize()
netsize = (self.network.max_x+1,self.network.max_y+1)
grid = min(size[0]/netsize[0],size[1]/netsize[1])
xoffset = (size[0] - (netsize[0]-1)*grid)/2
yoffset = (size[1] - (netsize[1]-1)*grid)/2
self.transform = (grid, (xoffset,yoffset))
self.network.draw(dc,self.transform)
def SetNetwork(self,network):
self.network = network
self.network.reset()
self.redraw = True
class NetFrame(wx.Frame):
def __init__(self,parent=None,id=-1,size=(1000,500),
pos=wx.DefaultPosition,title='NetSim'):
wx.Frame.__init__(self,parent,id,title,pos,size)
| |
# This file contains methods to deal with criu images.
#
# According to http://criu.org/Images, criu images can be described
# with such IOW:
#
# IMAGE_FILE ::= MAGIC { ENTRY }
# ENTRY ::= SIZE PAYLOAD [ EXTRA ]
# PAYLOAD ::= "message encoded in ProtocolBuffer format"
# EXTRA ::= "arbitrary blob, depends on the PAYLOAD contents"
#
# MAGIC ::= "32 bit integer"
# SIZE ::= "32 bit integer, equals the PAYLOAD length"
#
# Images v1.1 NOTE: MAGIC now consist of 2 32 bit integers, first one is
# MAGIC_COMMON or MAGIC_SERVICE and the second one is same as MAGIC
# in images V1.0. We don't keep "first" magic in json images.
#
# In order to convert images to human-readable format, we use dict(json).
# Using json not only allows us to easily read\write images, but also
# to use a great variety of tools out there to manipulate them.
# It also allows us to clearly describe criu images structure.
#
# Using dict(json) format, criu images can be described like:
#
# {
# 'magic' : 'FOO',
# 'entries' : [
# entry,
# ...
# ]
# }
#
# Entry, in its turn, could be described as:
#
# {
# pb_msg,
# 'extra' : extra_msg
# }
#
import io
import base64
import struct
import os
import array
from . import magic
from . import pb
from . import pb2dict
if "encodebytes" not in dir(base64):
base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring
#
# Predefined hardcoded constants
sizeof_u16 = 2
sizeof_u32 = 4
sizeof_u64 = 8
# A helper for rounding
def round_up(x, y):
return (((x - 1) | (y - 1)) + 1)
class MagicException(Exception):
def __init__(self, magic):
self.magic = magic
# Generic class to handle loading/dumping criu images entries from/to bin
# format to/from dict(json).
class entry_handler:
"""
Generic class to handle loading/dumping criu images
entries from/to bin format to/from dict(json).
"""
def __init__(self, payload, extra_handler=None):
"""
Sets payload class and extra handler class.
"""
self.payload = payload
self.extra_handler = extra_handler
def load(self, f, pretty=False, no_payload=False):
"""
Convert criu image entries from binary format to dict(json).
Takes a file-like object and returnes a list with entries in
dict(json) format.
"""
entries = []
while True:
entry = {}
# Read payload
pbuff = self.payload()
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(pbuff, pretty)
# Read extra
if self.extra_handler:
if no_payload:
def human_readable(num):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if num < 1024.0:
if int(num) == num:
return "%d%sB" % (num, unit)
else:
return "%.1f%sB" % (num, unit)
num /= 1024.0
return "%.1fYB" % num
pl_size = self.extra_handler.skip(f, pbuff)
entry['extra'] = '... <%s>' % human_readable(pl_size)
else:
entry['extra'] = self.extra_handler.load(f, pbuff)
entries.append(entry)
return entries
def loads(self, s, pretty=False):
"""
Same as load(), but takes a string as an argument.
"""
f = io.BytesIO(s)
return self.load(f, pretty)
def dump(self, entries, f):
"""
Convert criu image entries from dict(json) format to binary.
Takes a list of entries and a file-like object to write entries
in binary format to.
"""
for entry in entries:
extra = entry.pop('extra', None)
# Write payload
pbuff = self.payload()
pb2dict.dict2pb(entry, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
# Write extra
if self.extra_handler and extra:
self.extra_handler.dump(extra, f, pbuff)
def dumps(self, entries):
"""
Same as dump(), but doesn't take file-like object and just
returns a string.
"""
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
def count(self, f):
"""
Counts the number of top-level object in the image file
"""
entries = 0
while True:
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
f.seek(size, 1)
entries += 1
return entries
# Special handler for pagemap.img
class pagemap_handler:
"""
Special entry handler for pagemap.img, which is unique in a way
that it has a header of pagemap_head type followed by entries
of pagemap_entry type.
"""
def load(self, f, pretty=False, no_payload=False):
entries = []
pbuff = pb.pagemap_head()
while True:
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entries.append(pb2dict.pb2dict(pbuff, pretty))
pbuff = pb.pagemap_entry()
return entries
def loads(self, s, pretty=False):
f = io.BytesIO(s)
return self.load(f, pretty)
def dump(self, entries, f):
pbuff = pb.pagemap_head()
for item in entries:
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
pbuff = pb.pagemap_entry()
def dumps(self, entries):
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
def count(self, f):
return entry_handler(None).count(f) - 1
# Special handler for ghost-file.img
class ghost_file_handler:
def load(self, f, pretty=False, no_payload=False):
entries = []
gf = pb.ghost_file_entry()
buf = f.read(4)
size, = struct.unpack('i', buf)
gf.ParseFromString(f.read(size))
g_entry = pb2dict.pb2dict(gf, pretty)
if gf.chunks:
entries.append(g_entry)
while True:
gc = pb.ghost_chunk_entry()
buf = f.read(4)
if len(buf) == 0:
break
size, = struct.unpack('i', buf)
gc.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(gc, pretty)
if no_payload:
f.seek(gc.len, os.SEEK_CUR)
else:
entry['extra'] = base64.encodebytes(f.read(gc.len)).decode('utf-8')
entries.append(entry)
else:
if no_payload:
f.seek(0, os.SEEK_END)
else:
g_entry['extra'] = base64.encodebytes(f.read()).decode('utf-8')
entries.append(g_entry)
return entries
def loads(self, s, pretty=False):
f = io.BytesIO(s)
return self.load(f, pretty)
def dump(self, entries, f):
pbuff = pb.ghost_file_entry()
item = entries.pop(0)
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
if pbuff.chunks:
for item in entries:
pbuff = pb.ghost_chunk_entry()
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
f.write(base64.decodebytes(item['extra']))
else:
f.write(base64.decodebytes(item['extra']))
def dumps(self, entries):
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
# In following extra handlers we use base64 encoding
# to store binary data. Even though, the nature
# of base64 is that it increases the total size,
# it doesn't really matter, because our images
# do not store big amounts of binary data. They
# are negligible comparing to pages size.
class pipes_data_extra_handler:
def load(self, f, pload):
size = pload.bytes
data = f.read(size)
return base64.encodebytes(data)
def dump(self, extra, f, pload):
data = base64.decodebytes(extra)
f.write(data)
def skip(self, f, pload):
f.seek(pload.bytes, os.SEEK_CUR)
return pload.bytes
class sk_queues_extra_handler:
def load(self, f, pload):
size = pload.length
data = f.read(size)
return base64.encodebytes(data)
def dump(self, extra, f, _unused):
data = base64.decodebytes(extra)
f.write(data)
def skip(self, f, pload):
f.seek(pload.length, os.SEEK_CUR)
return pload.length
class tcp_stream_extra_handler:
def load(self, f, pbuff):
d = {}
inq = f.read(pbuff.inq_len)
outq = f.read(pbuff.outq_len)
d['inq'] = base64.encodebytes(inq)
d['outq'] = base64.encodebytes(outq)
return d
def dump(self, extra, f, _unused):
inq = base64.decodebytes(extra['inq'])
outq = base64.decodebytes(extra['outq'])
f.write(inq)
f.write(outq)
def skip(self, f, pbuff):
f.seek(0, os.SEEK_END)
return pbuff.inq_len + pbuff.outq_len
class ipc_sem_set_handler:
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64)
s = array.array('H')
if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch")
s.fromstring(f.read(size))
f.seek(rounded - size, 1)
return s.tolist()
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64)
s = array.array('H')
if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch")
s.fromlist(extra)
if len(s) != entry['nsems']:
raise Exception("Number of semaphores mismatch")
f.write(s.tostring())
f.write('\0' * (rounded - size))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
f.seek(round_up(size, sizeof_u64), os.SEEK_CUR)
return size
class ipc_msg_queue_handler:
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
messages = []
for x in range(0, entry['qnum']):
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
msg = pb.ipc_msg()
msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64)
data = f.read(msg.msize)
f.seek(rounded - msg.msize, 1)
messages.append(pb2dict.pb2dict(msg))
messages.append(base64.encodebytes(data))
return messages
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
for i in range(0, len(extra), 2):
msg = pb.ipc_msg()
pb2dict.dict2pb(extra[i], msg)
msg_str = msg.SerializeToString()
size = len(msg_str)
f.write(struct.pack('i', size))
f.write(msg_str)
rounded = round_up(msg.msize, sizeof_u64)
data = base64.decodebytes(extra[i + 1])
f.write(data[:msg.msize])
f.write('\0' * (rounded - msg.msize))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
pl_len = 0
for x in range(0, entry['qnum']):
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
msg = pb.ipc_msg()
msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64)
f.seek(rounded, os.SEEK_CUR)
pl_len += size + msg.msize
return pl_len
class ipc_shm_handler:
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
data = f.read(size)
rounded = round_up(size, sizeof_u32)
f.seek(rounded - size, 1)
return base64.encodebytes(data)
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
data = base64.decodebytes(extra)
rounded = round_up(size, sizeof_u32)
f.write(data[:size])
f.write('\0' * (rounded - size))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
rounded = round_up(size, sizeof_u32)
f.seek(rounded, os.SEEK_CUR)
return size
handlers = {
'INVENTORY': entry_handler(pb.inventory_entry),
'CORE': entry_handler(pb.core_entry),
'IDS': entry_handler(pb.task_kobj_ids_entry),
'CREDS': entry_handler(pb.creds_entry),
'UTSNS': entry_handler(pb.utsns_entry),
'TIMENS': entry_handler(pb.timens_entry),
'IPC_VAR': entry_handler(pb.ipc_var_entry),
'FS': entry_handler(pb.fs_entry),
| |
<filename>experiments/smal_shape.py
"""
Example usage:
python -m smalst.experiments.smal_shape --zebra_dir='smalst/zebra_no_toys_wtex_1000_0' --num_epochs=100000 --save_epoch_freq=20 --name=smal_net_600 --save_training_imgs=True --num_images=20000 --do_validation=True
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os.path as osp
import numpy as np
import torch
import torchvision
from torch.autograd import Variable
import scipy.io as sio
import scipy
import scipy.misc
from collections import OrderedDict
import pickle as pkl
from ..data import zebra as zebra_data
from ..utils import visutil
from ..utils import smal_vis
from ..utils import image as image_utils
from ..nnutils import train_utils
from ..nnutils import loss_utils
from ..nnutils import smal_mesh_net
from ..nnutils.nmr import NeuralRenderer
from ..nnutils import geom_utils
flags.DEFINE_string('dataset', 'zebra', 'zebra')
# Weights:
flags.DEFINE_float('kp_loss_wt', 10., 'keypoint loss weight')
flags.DEFINE_float('kp_2D_loss_wt', 10., 'loss weight for the 2D keypoints predicted by the network')
flags.DEFINE_float('mask_loss_wt', 30., 'mask loss weight')
flags.DEFINE_float('cam_loss_wt', 10000., 'weights to camera loss')
flags.DEFINE_float('deform_reg_wt', 100., 'reg to deformation')
flags.DEFINE_float('triangle_reg_wt', 100., 'weights to triangle smoothness prior')
flags.DEFINE_float('vert2kp_loss_wt', .16, 'reg to vertex assignment')
flags.DEFINE_float('tex_loss_wt', 10., 'weights to tex loss')
flags.DEFINE_boolean('grad_v_in_tex_loss', False, '')
flags.DEFINE_boolean('use_keypoints', True, 'use keypoints loss')
flags.DEFINE_boolean('use_mask', True, 'use mask loss')
flags.DEFINE_boolean('use_shape_reg', False, 'use shape regularizers')
flags.DEFINE_float('tex_map_loss_wt', 10., 'weights to tex map loss')
flags.DEFINE_float('tex_dt_loss_wt', .5, 'weights to tex dt loss')
flags.DEFINE_float('mod_trans_loss_wt', 4000., 'weights for model translation loss')
flags.DEFINE_float('mod_pose_loss_wt', 200000., 'weights for model pose loss')
flags.DEFINE_float('betas_reg_wt', 100000., 'weights for betas prior loss')
flags.DEFINE_float('delta_v_loss_wt', 100000., 'weights for model delta_v')
flags.DEFINE_float('occ_loss_wt', 100., 'weights for occlusion loss')
flags.DEFINE_boolean('infer_vert2kp', False, 'estimate keypoints on the 3D model instead of using predefined values.')
flags.DEFINE_boolean('no_delta_v', False, 'set predicted deformations to zero')
flags.DEFINE_boolean('use_gtpose', False, 'if true uses gt pose for projection, but trans still gets trained.')
flags.DEFINE_boolean('use_gttrans', False, 'if true uses gt trans for projection, but pose still gets trained.')
flags.DEFINE_boolean('use_gtcam', False, 'if true uses gt cam for projection, but cam still gets trained.')
flags.DEFINE_boolean('use_gtbetas', False, 'if true uses gt betas for projection, but betas still gets trained.')
flags.DEFINE_boolean('use_gtdeltav', False, '')
flags.DEFINE_boolean('use_gttexture', False, '')
flags.DEFINE_boolean('use_camera_loss', True, 'if train with gt camera')
flags.DEFINE_boolean('random_bkg', False, 'if using a random background rather than black in the pred image')
flags.DEFINE_boolean('use_perceptual_loss', True, '')
flags.DEFINE_boolean('uv_flow', True, '')
flags.DEFINE_float('uv_flow_loss_wt', 100000., 'weights for uv_flow loss')
flags.DEFINE_boolean('use_pose_geodesic_loss', True, '')
flags.DEFINE_boolean('use_loss_on_whole_image', False, 'if compose the predicted animal with the image background')
flags.DEFINE_boolean('use_tex_dt', True, 'if use loss (4) in the birds paper')
flags.DEFINE_boolean('white_balance_for_texture_map', False, '')
flags.DEFINE_boolean('use_img_as_background', False, 'if to use the input image as background for the optimization')
flags.DEFINE_boolean('use_gtmask_for_background', False, 'if to use the input image as background for the optimization')
flags.DEFINE_boolean('use_per_image_rgb_bg', False, 'if to compute per-imag rgb colors for background in optimization')
opts = flags.FLAGS
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'cachedir')
class ShapeTrainer(train_utils.Trainer):
def define_model(self):
opts = self.opts
self.symmetric = opts.symmetric
img_size = (opts.img_size, opts.img_size)
texture_mask_path = 'smalst/'+opts.dataset+'_data/texture_maps/my_smpl_00781_4_all_template_w_tex_uv_001_mask_small.png'
self.texture_map_mask = torch.Tensor(scipy.misc.imread(texture_mask_path) / 255.0).cuda(device=opts.gpu_id)
tex_masks = None
data_path = 'smalst/smpl_models/my_smpl_data_00781_4_all.pkl'
data = pkl.load(open(data_path))
pca_var = data['eigenvalues'][:opts.num_betas]
self.betas_prec = torch.Tensor(pca_var).cuda(device=opts.gpu_id).expand(opts.batch_size, opts.num_betas)
self.model = smal_mesh_net.MeshNet(
img_size, opts, nz_feat=opts.nz_feat, num_kps=opts.num_kps, tex_masks=tex_masks)
if opts.num_pretrain_epochs > 0:
self.load_network(self.model, 'pred', opts.num_pretrain_epochs)
self.model = self.model.cuda(device=opts.gpu_id)
if not opts.infer_vert2kp:
self.vert2kp = torch.Tensor(pkl.load(open('smalst/'+opts.dataset+'_data/verts2kp.pkl'))).cuda(device=opts.gpu_id)
# Data structures to use for triangle priors.
edges2verts = self.model.edges2verts
# B x E x 4
edges2verts = np.tile(np.expand_dims(edges2verts, 0), (opts.batch_size, 1, 1))
self.edges2verts = Variable(torch.LongTensor(edges2verts).cuda(device=opts.gpu_id), requires_grad=False)
# For renderering.
faces = self.model.faces.view(1, -1, 3)
self.faces = faces.repeat(opts.batch_size, 1, 1)
self.renderer = NeuralRenderer(opts.img_size, opts.projection_type, opts.norm_f, opts.norm_z, opts.norm_f0)
if opts.texture:
self.tex_renderer = NeuralRenderer(opts.img_size, opts.projection_type, opts.norm_f, opts.norm_z, opts.norm_f0)
# Only use ambient light for tex renderer
if opts.use_directional_light:
self.tex_renderer.directional_light_only()
else:
self.tex_renderer.ambient_light_only()
# For visualization
self.vis_rend = smal_vis.VisRenderer(opts.img_size, faces.data.cpu().numpy(), opts.projection_type, opts.norm_f, opts.norm_z, opts.norm_f0)
self.background_imgs = None
return
def init_dataset(self):
opts = self.opts
if opts.dataset == 'zebra':
self.data_module = zebra_data
else:
print('Unknown dataset %d!' % opts.dataset)
self.dataloader = self.data_module.data_loader(opts)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def define_criterion(self):
if opts.use_keypoints:
self.projection_loss = loss_utils.kp_l2_loss
if opts.use_mask:
self.mask_loss_fn = loss_utils.mask_loss
if opts.infer_vert2kp:
self.entropy_loss = loss_utils.entropy_loss
if self.opts.use_camera_loss:
self.camera_loss = loss_utils.camera_loss
if opts.use_smal_betas:
self.betas_loss_fn = loss_utils.betas_loss
self.delta_v_loss_fn = loss_utils.delta_v_loss
if self.opts.texture:
if self.opts.use_perceptual_loss:
if False:
self.texture_loss = loss_utils.MSE_texture_loss
else:
self.texture_loss = loss_utils.PerceptualTextureLoss()
else:
self.texture_loss = loss_utils.texture_loss
self.texture_dt_loss_fn = loss_utils.texture_dt_loss
if opts.texture_map:
self.texture_map_loss = loss_utils.texture_map_loss
if opts.uv_flow:
self.uv_flow_loss = loss_utils.uv_flow_loss
self.model_trans_loss_fn = loss_utils.model_trans_loss
self.model_pose_loss_fn = loss_utils.model_pose_loss
def set_optimization_input(self):
opts = self.opts
cams = np.zeros((self.scale_pred.shape[0], 3))
cams[:,0] = self.scale_pred.data
cams[:,1:] = 128
self.cams = Variable(torch.FloatTensor(cams).cuda(device=opts.gpu_id), requires_grad=False)
self.model_trans = Variable(self.trans_pred.cuda(device=opts.gpu_id), requires_grad=False)
def set_optimization_variables(self):
'''
Sets as optimization variables those obtained as prediction from the network
'''
opts = self.opts
cams = np.zeros((self.scale_pred.shape[0], 3))
cams[:,0] = self.scale_pred.data
cams[:,1:] = 128
# Prediction is gt
self.cams = Variable(torch.FloatTensor(cams).cuda(device=opts.gpu_id), requires_grad=False)
self.model_pose = Variable(self.pose_pred.cuda(device=opts.gpu_id), requires_grad=False)
self.model_trans = Variable(self.trans_pred.cuda(device=opts.gpu_id), requires_grad=False)
self.delta_v= Variable(self.delta_v.cuda(device=opts.gpu_id), requires_grad=False)
def set_input(self, batch):
opts = self.opts
# Image with annotations.
input_img_tensor = batch['img'].type(torch.FloatTensor)
for b in range(input_img_tensor.size(0)):
input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])
img_tensor = batch['img'].type(torch.FloatTensor)
self.input_imgs = Variable( input_img_tensor.cuda(device=opts.gpu_id), requires_grad=False)
self.imgs = Variable( img_tensor.cuda(device=opts.gpu_id), requires_grad=False)
#if opts.use_mask and 'mask' in batch.keys():
if 'mask' in batch.keys():
mask_tensor = batch['mask'].type(torch.FloatTensor)
self.masks = Variable( mask_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.masks = None
if opts.use_keypoints and 'kp' in batch.keys():
kp_tensor = batch['kp'].type(torch.FloatTensor)
self.kps = Variable( kp_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.kps = None
self.img_paths = batch['img_path']
if 'camera_params' in batch.keys():
cam_tensor = batch['camera_params'].type(torch.FloatTensor)
if opts.use_norm_f_and_z:
cam_tensor[:,0] = (cam_tensor[:,0]-opts.norm_f0)/opts.norm_f
self.cams = Variable( cam_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.cams = None
cam_c_tensor = batch['camera_params_c'].type(torch.FloatTensor)
self.cams_center = Variable(cam_c_tensor.cuda(device=opts.gpu_id), requires_grad=False)
if 'model_trans' in batch.keys():
model_trans_tensor = batch['model_trans'].type(torch.FloatTensor)
if opts.use_norm_f_and_z:
model_trans_tensor[:,2] = model_trans_tensor[:,2]-opts.norm_z +1.
self.model_trans = Variable(
model_trans_tensor.cuda(device=opts.gpu_id), requires_grad=False)
if 'model_pose' in batch.keys():
model_pose_tensor = batch['model_pose'].type(torch.FloatTensor)
self.model_pose = Variable(
model_pose_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.model_trans = None
self.model_pose = None
if 'model_betas' in batch.keys():
model_betas_tensor = batch['model_betas'][:,:self.opts.num_betas].type(torch.FloatTensor)
self.model_betas = Variable(
model_betas_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.model_betas = None
if 'model_delta_v' in batch.keys():
model_delta_v_tensor = batch['model_delta_v'].type(torch.FloatTensor)
self.model_delta_v = Variable(
model_delta_v_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.model_delta_v = None
if opts.texture_map:
assert('texture_map' in batch.keys())
texture_map_tensor = batch['texture_map'].type(torch.FloatTensor)
self.texture_map = Variable(texture_map_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.texture_map = None
if 'uv_flow' in batch.keys():
uv_flow_tensor = batch['uv_flow'].type(torch.FloatTensor).permute(0,3,1,2)
self.uv_flow_gt = Variable(uv_flow_tensor.cuda(device=opts.gpu_id), requires_grad=False)
else:
self.uv_flow_gt = None
# Compute barrier distance transform.
#if opts.use_mask and self.masks is not None:
if self.masks is not None:
mask_dts = np.stack([image_utils.compute_dt_barrier(m) for m in batch['mask']])
dt_tensor = torch.FloatTensor(mask_dts).cuda(device=opts.gpu_id)
# B x 1 x N x N
self.dts_barrier = Variable(dt_tensor, requires_grad=False).unsqueeze(1)
def forward(self, opts_scale=None, opts_pose=None, opts_trans=None, opts_delta_v=None):
opts = self.opts
if opts.use_double_input:
masks = self.input_imgs*self.masks
else:
masks = None
if opts.texture:
pred_codes, self.textures = self.model.forward(self.input_imgs, masks)
else:
pred_codes = self.model.forward(self.input_imgs, masks)
self.delta_v, self.scale_pred, self.trans_pred, self.pose_pred, self.betas_pred, self.kp_2D_pred = pred_codes
if opts.fix_trans:
self.trans_pred[:,2] = self.model_trans[:,2]
if opts.use_gttrans:
print('Using gt trans')
self.trans_pred = self.model_trans
if opts.use_gtpose:
print('Using gt pose')
self.pose_pred = self.model_pose
if opts.use_gtcam:
print('Using gt cam')
self.scale_pred = self.cams[:,0,None]
if opts.use_gtbetas:
print('Using gt betas')
self.betas_pred = self.model_betas
if opts.use_gtdeltav:
print('Using gt delta_v')
self.delta_v = self.model_delta_v
if self.cams is not None:
# The camera center does not change; here we predicting flength
self.cam_pred = torch.cat([self.scale_pred, self.cams[:,1:]], 1)
else:
self.cam_pred = torch.cat([self.scale_pred, self.cams_center], 1)
if opts.only_mean_sym:
del_v = self.delta_v
else:
del_v = self.model.symmetrize(self.delta_v)
if opts.no_delta_v:
del_v[:] = 0
if opts.use_smal_pose:
self.pred_v = self.model.get_smal_verts(self.pose_pred, self.betas_pred, self.trans_pred, del_v)
else:
# TODO
self.mean_shape = self.model.get_mean_shape()
self.pred_v = self.mean_shape + del_v + self.trans_pred
# Compute keypoints.
if opts.infer_vert2kp:
self.vert2kp = torch.nn.functional.softmax(self.model.vert2kp, dim=1)
self.kp_verts = torch.matmul(self.vert2kp, self.pred_v)
# Set projection camera
proj_cam = self.cam_pred
# Project keypoints
if opts.use_keypoints:
self.kp_pred = self.renderer.project_points(self.kp_verts, proj_cam)
# Render mask.
self.mask_pred = self.renderer.forward(self.pred_v, self.faces, proj_cam)
if opts.texture:
self.texture_flow = self.textures
self.textures = geom_utils.sample_textures(self.texture_flow, self.imgs)
tex_size = self.textures.size(2)
self.textures = self.textures.unsqueeze(4).repeat(1, 1, 1, 1, tex_size, 1)
if opts.use_gttexture:
idx=0
from ..utils.obj2nmr import obj2nmr_uvmap
uv_map = obj2nmr_uvmap(self.model.ft, self.model.vt, tex_size=tex_size)
uv_img = self.texture_map[idx,:,:,:]
uv_img = uv_img.permute(1,2,0)
texture_t = sample_texture(uv_map, uv_img)
self.textures[0,:,:,:,:,:] = texture_t[0,:,:,:,:,:]
if opts.grad_v_in_tex_loss:
self.texture_pred = self.tex_renderer.forward(self.pred_v, self.faces, proj_cam.detach(), textures=self.textures)
else:
self.texture_pred = self.tex_renderer.forward(self.pred_v.detach(), self.faces, proj_cam.detach(), textures=self.textures)
else:
self.textures = None
if opts.save_training_imgs and opts.use_mask and self.masks is not None:
T = 255*self.mask_pred.cpu().detach().numpy()[0,:,:]
scipy.misc.imsave(opts.name + '_mask_pred.png', T)
T = 255*self.masks.cpu().detach().numpy()[0,:,:,:]
T = np.transpose(T,(1,2,0))[:,:,0]
scipy.misc.imsave(opts.name + '_mask_gt.png', T)
# Compute losses for this instance.
if self.opts.use_keypoints and self.kps is not None:
self.kp_loss = self.projection_loss(self.kp_pred, self.kps)
if self.opts.use_mask and self.masks is not None:
self.mask_loss = self.mask_loss_fn(self.mask_pred, self.masks[:,0,:,:])
if self.opts.use_camera_loss and self.cams is not None:
self.cam_loss = self.camera_loss(self.cam_pred, self.cams, 0, self.opts.use_norm_f_and_z)
if self.model_trans is not None:
self.mod_trans_loss = self.model_trans_loss_fn(self.trans_pred, self.model_trans)
if self.model_pose is not None:
self.mod_pose_loss = self.model_pose_loss_fn(self.pose_pred, self.model_pose, self.opts)
if opts.texture:
if opts.use_loss_on_whole_image:
if self.background_imgs is None:
print("SETTING BACKGROUND MODEL")
self.background_imgs = np.zeros(self.imgs.shape)
fg_mask = self.mask_pred.detach().cpu().numpy()
I = self.imgs.detach().cpu().numpy()
bg_mask = np.abs(fg_mask-1)
rgb = np.zeros((3))
n = np.sum(bg_mask)
for c | |
"""
A resource API for WAF and WAF-regional
"""
from enum import Enum, auto
from typing import Iterable, Dict, Container, Union, Generator, Optional, List
import boto3
from .basic import scroll
import abc
import logging
import json
import collections
from .cleaning import clean_up_stack
logger = logging.getLogger(__name__)
class Kind(Enum):
policy = 'policy'
# generated via
# for xx in [x[7:] for x in dir(waf_regional) if x.startswith('update')]:
# print('{} = \'{}\''.format(xx, xx))
byte_match_set = 'byte_match_set'
geo_match_set = 'geo_match_set'
ip_set = 'ip_set'
rate_based_rule = 'rate_based_rule'
regex_match_set = 'regex_match_set'
regex_pattern_set = 'regex_pattern_set'
rule = 'rule'
rule_group = 'rule_group'
size_constraint_set = 'size_constraint_set'
sql_injection_match_set = 'sql_injection_match_set'
web_acl = 'web_acl'
xss_match_set = 'xss_match_set'
match_type_to_kind = {'IPMatch': Kind.ip_set,
'ByteMatch': Kind.byte_match_set,
'SqlInjectionMatch': Kind.sql_injection_match_set,
'GeoMatch': Kind.geo_match_set,
'SizeConstraint': Kind.size_constraint_set,
'XssMatch': Kind.xss_match_set,
'RegexMatch': Kind.regex_match_set,
}
kind_to_match_type = {v: k for k, v in match_type_to_kind.items()}
def list_resources(client, kind: Kind) -> Iterable[Dict]:
if kind == Kind.policy:
return scroll(client.list_policies, MaxResults=100)
else:
fn = getattr(client, 'list_{}s'.format(kind.value))
return scroll(fn)
def create_resource(client, change_token, kind: Kind, name, **kwargs):
fn = getattr(client, 'create_{}'.format(kind.value))
metric_name = name.replace('-', '').replace('_', '')
if kind in [Kind.rule, Kind.rule_group]:
return fn(Name=name, MetricName=metric_name, ChangeToken=change_token)
elif kind in [Kind.rate_based_rule]:
_kwargs = dict(RateKey='IP', RateLimit=2000)
_kwargs.update(kwargs)
return fn(Name=name, MetricName=metric_name, ChangeToken=change_token, **_kwargs)
elif kind == Kind.web_acl:
_kwargs = dict(DefaultAction={'Type': 'ALLOW'})
return fn(Name=name, MetricName=metric_name, ChangeToken=change_token, **_kwargs)
else:
return fn(Name=name, ChangeToken=change_token)
def get_service_name(kind: Kind, region_name):
if kind in {Kind.policy}:
return 'fms'
elif region_name == 'global':
return 'waf'
else:
return 'waf-regional'
class WAFResource(abc.ABC):
top_key: str
id_key: str
kind: Kind
name_key: str = 'Name'
session: boto3.Session
region_name: str
def __init__(self, session, region_name=None, name=None, id_=None, ensure_exists=True, old_names=(),
creation_kwargs={}):
"""
service_client could be waf, waf-regional, or fms
WARNING: if given, name is assumed to identify the condition set, although this is not always true
Caveat: do not use ensure_exists with FMS policy.
"""
self.session = session
self.region_name = region_name
self.service_client = session.client(get_service_name(self.kind, region_name),
region_name=region_name if region_name != 'global' else 'us-east-1')
self.old_versions = [
self.__class__(session, region_name=region_name, name=old_name, ensure_exists=False)
for old_name in old_names]
clean_up_stack.append(self.clean_old_versions)
assert name or id_
if name:
self.name = name
maybe_id = self.get_id(name)
if maybe_id:
self.id_ = maybe_id
self.exists = True
elif ensure_exists:
if self.kind == Kind.policy:
raise NotImplementedError
logger.info('{} "{}" does not exist. Creating.'.format(self.top_key, name))
while True:
try:
resp = create_resource(self.service_client,
change_token=self.service_client.get_change_token()['ChangeToken'],
kind=self.kind, name = self.name, **creation_kwargs)
break
except self.service_client.exceptions.WAFStaleDataException:
logger.info("Got WAFStaleDataException; retrying ...")
continue
self.id_ = resp[self.top_key][self.id_key]
self.exists = True
else:
self.exists = False
elif id_:
self.id_ = id_
self.name = self.describe()[self.name_key]
self.exists = True
def get_id(self, name):
# name is assumed to be unique
# find ID
for item in list_resources(self.service_client, self.kind):
if item[self.name_key] == name:
return item[self.id_key]
else:
return None
def describe(self):
fn = getattr(self.service_client, 'get_{}'.format(self.kind.value))
return fn(**{self.id_key: self.id_})[self.top_key]
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id_ == other.id_
@abc.abstractmethod
def delete(self, **kwargs):
pass
def clean_old_versions(self):
for old_version in self.old_versions:
old_version.delete()
class UpdateableWAFResource(WAFResource, Iterable, metaclass=abc.ABCMeta):
descriptor_key: str
descriptors_key: str
@abc.abstractmethod
def __iter__(self):
pass
def update(self, insertions=(), deletions=(), **kwargs):
# descriptor structure for Kind.ip:
# {
# "Type": "IPV4"|"IPV6",
# "Value": "string"
# }
updates = (
[{'Action': 'DELETE',
self.descriptor_key: dd}
for dd in deletions]
+ [{'Action': 'INSERT',
self.descriptor_key: dd}
for dd in insertions]
)
specific_update_fn_kwargs = {
self.id_key: self.id_,
'Updates': updates}
specific_update_fn_kwargs.update(kwargs)
specific_update_fn = getattr(self.service_client, 'update_' + self.kind.value)
while True:
try:
return specific_update_fn(ChangeToken=self.service_client.get_change_token()['ChangeToken'],
**specific_update_fn_kwargs)
except self.service_client.exceptions.WAFStaleDataException:
logger.info("Got WAFStaleDataException; retrying ...")
continue
def put(self, descriptors, **kwargs):
"""Idempotent -- make the live descriptors the same as the descriptors argument
:param descriptors: the `update` method may have more help on descriptor structure
:param kwargs: passes kwargs through to update
:return: None
"""
extants = list(self)
insertions = [descriptor
for descriptor in descriptors
if descriptor not in extants
]
deletions = [descriptor
for descriptor in extants
if descriptor not in descriptors
]
if insertions or deletions:
self.update(insertions, deletions, **kwargs)
def delete(self):
if self.exists:
self.put(())
delete_method = getattr(self.service_client, 'delete_{}'.format(self.kind.value))
while True:
try:
delete_method(ChangeToken=self.service_client.get_change_token()['ChangeToken'],
**{self.id_key: self.id_})
break
except self.service_client.exceptions.WAFStaleDataException:
logger.info("Got WAFStaleDataException; retrying ...")
continue
self.exists = False
class ConditionSet(UpdateableWAFResource, Container, Iterable, metaclass=abc.ABCMeta):
def __iter__(self):
yield from self.descriptors()
def __contains__(self, element):
return element in self.descriptors()
def descriptors(self):
return self.describe()[self.descriptors_key]
@property
def base_condition_set(self):
return self
class IPSet(ConditionSet):
top_key = 'IPSet'
id_key = 'IPSetId'
descriptor_key = 'IPSetDescriptor'
descriptors_key = 'IPSetDescriptors'
kind = Kind.ip_set
class GeoMatchSet(ConditionSet):
top_key = 'GeoMatchSet'
id_key = 'GeoMatchSetId'
descriptor_key = 'GeoMatchConstraint'
descriptors_key = 'GeoMatchConstraints'
kind = Kind.geo_match_set
class ByteMatchSet(ConditionSet):
top_key = 'ByteMatchSet'
id_key = 'ByteMatchSetId'
descriptor_key = 'ByteMatchTuple'
descriptors_key = 'ByteMatchTuples'
kind = Kind.byte_match_set
class RegexMatchSet(ConditionSet):
"""
put args:
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'RegexPatternSetId': regex_pattern_set.id_
}
See waf-regional `update_regex_match_set`.
"""
top_key = 'RegexMatchSet'
id_key = 'RegexMatchSetId'
descriptor_key = 'RegexMatchTuple'
descriptors_key = 'RegexMatchTuples'
kind = Kind.regex_match_set
class RegexPatternSet(ConditionSet):
"""
Does not fit in a Rule, but otherwise is just like any other
ConditionSet. Instead, put it in a RegexMatchSet.
"""
top_key = 'RegexPatternSet'
id_key = 'RegexPatternSetId'
descriptor_key = 'RegexPatternString'
descriptors_key = 'RegexPatternStrings'
kind = Kind.regex_pattern_set
#def __iter__(self):
# yield from self.describe()[self.descriptors_key]
class RuleGroup(UpdateableWAFResource):
top_key = 'RuleGroup'
id_key = 'RuleGroupId'
descriptor_key = 'ActivatedRule'
descriptors_key = 'ActivatedRules'
kind = Kind.rule_group
def list_activated_rules(self):
return scroll(self.service_client.list_activated_rules_in_rule_group,
RuleGroupId=self.id_)
def rules(self):
for item in scroll(self.service_client.list_activated_rules_in_rule_group,
RuleGroupId=self.id_):
yield Rule(self.session, self.region_name, id_=item[Rule.id_key])
def __iter__(self):
yield from self.list_activated_rules()
def update(self, insertions=(), deletions=(), **kwargs):
"""
Descriptor structure:
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
}
"""
assert not kwargs
super().update(insertions, deletions)
class RuleElement:
Negated: bool
condition_set: ConditionSet
def __init__(self, condition_set: ConditionSet, Negated: bool = False):
self.condition_set = condition_set
self.Negated = Negated
def translate_to_aws(self):
match_type = kind_to_match_type[self.condition_set.kind]
return {
'Negated': self.Negated,
'Type': match_type,
'DataId': self.condition_set.id_,
}
def __eq__(self, other):
return isinstance(other, self.__class__) and self.translate_to_aws() == other.translate_to_aws()
def __neg__(self):
return RuleElement(self.condition_set, Negated=not self.Negated)
class Rule(UpdateableWAFResource, Iterable):
top_key = 'Rule'
id_key = 'RuleId'
kind = Kind.rule
descriptor_key = 'Predicate'
descriptors_key = 'Predicates'
def __iter__(self) -> Generator[RuleElement, None, None]:
for pred in self.describe()[self.descriptors_key]:
_kind = match_type_to_kind[pred['Type']]
cls = kind_to_type[_kind]
_id = pred['DataId']
condition_set = cls(self.session, self.region_name, id_=_id)
yield RuleElement(condition_set, pred['Negated'])
def update(self, insertions: Iterable[RuleElement]=(), deletions: Iterable[RuleElement]=(), **kwargs):
_insertions = [xx.translate_to_aws() for xx in insertions]
_deletions = [xx.translate_to_aws() for xx in deletions]
super().update(insertions=_insertions, deletions=_deletions)
class RateBasedRule(UpdateableWAFResource, Iterable):
"""
Note: RateBasedRule's naming conventions are odd
After initialization, please call `put` with your RateLimit, to ensure idempotency.
"""
top_key = 'Rule'
id_key = 'RuleId'
kind = Kind.rate_based_rule
descriptor_key = 'Predicate'
descriptors_key = 'MatchPredicates'
def __iter__(self) -> Generator[RuleElement, None, None]:
for pred in self.describe()[self.descriptors_key]:
_kind = match_type_to_kind[pred['Type']]
cls = kind_to_type[_kind]
_id = pred['DataId']
condition_set = cls(self.session, self.region_name, id_=_id)
yield RuleElement(condition_set, pred['Negated'])
def update(self, insertions: Iterable[RuleElement]=(), deletions: Iterable[RuleElement]=(), RateLimit=2000):
_insertions = [xx.translate_to_aws() for xx in insertions]
_deletions = [xx.translate_to_aws() for xx in deletions]
super().update(insertions=_insertions, deletions=_deletions, RateLimit=RateLimit)
class WebACL(UpdateableWAFResource):
top_key = 'WebACL'
id_key = 'WebACLId'
descriptor_key = 'ActivatedRule'
descriptors_key = 'Rules'
kind = Kind.web_acl
def rules(self):
for item in self:
yield Rule(self.session, self.region_name, id_=item[Rule.id_key])
def __iter__(self):
yield from self.service_client.get_web_acl(WebACLId=self.id_)[self.top_key][self.descriptors_key]
def update(self, insertions=(), deletions=(), **kwargs):
"""
Descriptor structure:
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
}
"""
assert not kwargs
super().update(insertions, deletions)
def list_resources_arns(self) -> Iterable[dict]:
if self.region_name == 'global':
from .cloudfront import cloudfront_distributions
for dist in cloudfront_distributions(self.session):
associated_webacl = dist['WebACLId']
if associated_webacl == self.id_:
yield dist['DistributionId']
else:
yield from self.service_client.list_resources_for_web_acl(WebACLId=self.id_)['ResourceArns']
class Policy(WAFResource):
top_key = 'Policy'
id_key = 'PolicyId'
kind = Kind.policy
name_key = 'PolicyName'
fms_shield_supported_resource_types = ['AWS::ElasticLoadBalancingV2::LoadBalancer',
'AWS::ElasticLoadBalancing::LoadBalancer',
'AWS::EC2::EIP']
def subtype(self):
return self.describe()['SecurityServicePolicyData']['Type']
def __iter__(self):
desc = self.describe()
policy_data = desc['SecurityServicePolicyData']
if policy_data['Type'] == 'WAF':
service_data = json.loads(policy_data['ManagedServiceData'])
for rule_group in service_data['ruleGroups']:
yield RuleGroup(self.session, self.region_name, id_=rule_group['id'])
def put(self, managed_service_data,
policy_type: Optional[str] = None,
resource_tags: Optional[List[Dict]] = None,
resource_type: Optional[str] = None,
resource_type_list: Optional[List[str]] = None,
include_map: Iterable = (),
exclude_map: Iterable = ()):
"""
resource_tags structure:
[
{
'Key': 'string',
'Value': 'string'
},
]
managed_service_data:
see ManagedServiceData in fms `put_policy`
examples:
{
'type': 'WAF',
'ruleGroups': [{'id': rule_group.id_,
'overrideAction': {'type': 'COUNT'}}],
'defaultAction': {'type': 'BLOCK'}
}
{ "type":"SHIELD_ADVANCED" }
include_map:
if empty, equivalent to listing all accounts
"""
if policy_type is None:
policy_type = managed_service_data['type']
if resource_type is None:
if policy_type == 'WAF':
if self.region_name == 'global':
resource_type = 'AWS::CloudFront::Distribution'
else:
resource_type = 'AWS::ElasticLoadBalancingV2::LoadBalancer'
| |
"""
if addr is None:
raise TypeError('Address can not be set to None')
self._addr = addr
def get_scope(self):
"""
Get the subnet scope
:returns: The subnet scope as a string
"""
return self._scope
def set_scope(self, scope):
"""
Set the subnet address
:param scope: The subnet scope. It can be either "public", "private" or "shared".
"""
if scope is None:
raise TypeError('Scope can not be set to None')
self._scope = scope
def get_json(self):
"""
Returns json representation of the subnet
:returns: json dictionary of subnet
"""
attributes = self._generate_attributes()
if self.get_addr() is None:
raise ValueError('Subnet address is not set')
attributes['ip'] = self.get_addr()
if self.get_scope() is not None:
attributes['scope'] = self.get_scope()
return super(Subnet, self).get_json('fvSubnet', attributes=attributes)
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the APIC.
Called from the base object when calling the classmethod get()
"""
self.set_addr(str(attributes.get('ip')))
@classmethod
def get(cls, session, bridgedomain, tenant):
"""
Gets all of the Subnets from the APIC for a particular tenant and
bridgedomain.
:param session: the instance of Session used for APIC communication
:param bridgedomain: the instance of BridgeDomain used to limit the\
Subnet instances retreived from the APIC
:param tenant: the instance of Tenant used to limit the Subnet\
instances retreived from the APIC
:returns: List of Subnet objects
"""
return BaseACIObject.get(session, cls, 'fvSubnet',
parent=bridgedomain, tenant=tenant)
class Context(BaseACIObject):
""" Context : roughly equivalent to fvCtx """
def __init__(self, context_name, parent=None):
"""
:param context_name: String containing the Context name
:param parent: An instance of Tenant class representing the Tenant\
which contains this Context.
"""
super(Context, self).__init__(context_name, parent)
self.allow_all = False
@classmethod
def _get_apic_classes(cls):
"""
Get the APIC classes used by this acitoolkit class.
:returns: list of strings containing APIC class names
"""
resp = []
resp.append('fvCtx')
return resp
@classmethod
def _get_toolkit_to_apic_classmap(cls):
"""
Gets the APIC class to an acitoolkit class mapping dictionary
:returns: dict of APIC class names to acitoolkit classes
"""
return {}
@staticmethod
def _get_parent_class():
"""
Gets the class of the parent object
:returns: class of parent object
"""
return Tenant
@staticmethod
def _get_parent_dn(dn):
return dn.split('/ctx-')[0]
@staticmethod
def _get_name_from_dn(dn):
return dn.split('/ctx-')[1].split('/')[0]
@staticmethod
def _get_tenant_from_dn(dn):
"""
Get the tenant name from the DN
:param dn: String containing the DN
:return: string containing the tenant name
"""
return dn.split('/tn-')[1].split('/')[0]
def _populate_from_attributes(self, attributes):
"""
Sets the attributes when creating objects from the APIC.
Called from the base object when calling the classmethod get()
"""
self.descr = attributes.get('descr')
self.known_mcast = attributes.get('knwMcastAct')
self.modified_time = attributes.get('modTs')
self.name = attributes.get('name')
self.class_id = attributes.get('pcTag')
self.scope = attributes.get('scope')
self.vnid = attributes.get('seg')
dn = attributes.get('dn')
if dn is not None:
self.tenant = self._get_tenant_from_dn(dn)
else:
self.tenant = None
if attributes.get('pcEnfPref') == 'unenforced':
allow_all = True
else:
allow_all = False
self.set_allow_all(allow_all)
def set_allow_all(self, value=True):
"""
Set the allow_all value. When set, contracts will not be enforced\
in this context.
:param value: True or False. Default is True.
"""
self.allow_all = value
def get_allow_all(self):
"""
Returns the allow_all value from this Context. When set, contracts\
will not be enforced in this context.
:returns: True or False.
"""
return self.allow_all
def get_json(self):
"""
Returns json representation of fvCtx object
:returns: json dictionary of fvCtx object
"""
attributes = self._generate_attributes()
if self.get_allow_all():
attributes['pcEnfPref'] = 'unenforced'
else:
attributes['pcEnfPref'] = 'enforced'
return super(Context, self).get_json(self._get_apic_classes()[0],
attributes=attributes)
@classmethod
def get(cls, session, tenant=None):
"""
Gets all of the Contexts from the APIC.
:param session: the instance of Session used for APIC communication
:param tenant: the instance of Tenant used to limit the Contexts\
retreived from the APIC
:returns: List of Context objects
"""
return BaseACIObject.get(session, cls, cls._get_apic_classes()[0],
tenant, tenant)
@staticmethod
def get_table(contexts, title=''):
"""
Will create table of context information
:param title:
:param contexts:
"""
headers = ['Tenant',
'Context',
'VNID', 'Scope', 'Class ID',
'Allow All',
'Known MCST', 'Modified Time',
]
data = []
for context in sorted(contexts):
data.append([
context.get_parent().name,
context.name,
context.vnid,
context.scope,
context.class_id,
context.allow_all,
context.known_mcast,
context.modified_time
])
data = sorted(data)
table = Table(data, headers, title=title + 'Context')
return [table, ]
class BaseContract(BaseACIObject):
""" BaseContract : Base class for Contracts and Taboos """
def __init__(self, contract_name, parent=None):
super(BaseContract, self).__init__(contract_name, parent)
self._scope = 'context'
@staticmethod
def _get_contract_code():
"""
Returns the APIC class name for this contract.
Meant to be overridden by inheriting classes.
"""
raise NotImplementedError
@staticmethod
def _get_subject_code():
"""
Get the subject code
:return: None
"""
raise NotImplementedError
@staticmethod
def _get_subject_relation_code():
"""
Get the subject relation code
:return: None
"""
raise NotImplementedError
@classmethod
def _get_apic_classes(cls):
"""
Get the APIC classes used by this acitoolkit class.
:returns: list of strings containing APIC class names
"""
resp = []
resp.append(cls._get_contract_code())
return resp
@staticmethod
def _get_parent_class():
"""
Gets the class of the parent object
:returns: class of parent object
"""
return Tenant
def set_scope(self, scope):
"""Set the scope of this contract.
Valid values are 'context', 'global', 'tenant', and
'application-profile'
"""
if scope not in ('context', 'global', 'tenant', 'application-profile'):
raise ValueError
self._scope = scope
def get_scope(self):
"""Get the scope of this contract.
Valid values are 'context', 'global', 'tenant', and
'application-profile'
"""
return self._scope
@classmethod
def _get_toolkit_to_apic_classmap(cls):
"""
Gets the APIC class to an acitoolkit class mapping dictionary
:returns: dict of APIC class names to acitoolkit classes
"""
return {}
def get_json(self):
"""
Returns json representation of the contract
:returns: json dictionary of the contract
"""
resp_json = []
subj_code = self._get_subject_code()
subj_relation_code = self._get_subject_relation_code()
attributes = self._generate_attributes()
contract_code = self._get_contract_code()
contract = super(BaseContract, self).get_json(contract_code,
attributes=attributes,
get_children=False)
# Create a subject for every entry with a relation to the filter
subjects = []
for entry in self.get_children():
subject_name = self.name + entry.name
subject = {subj_code: {'attributes': {'name': subject_name}}}
filt_name = subject_name
filt = {subj_relation_code:
{'attributes': {'tnVzFilterName': filt_name}}}
subject[subj_code]['children'] = [filt]
subjects.append(subject)
contract[self._get_contract_code()]['children'] = subjects
resp_json.append(contract)
for entry in self.get_children():
entry_json = entry.get_json()
if entry_json is not None:
resp_json.append(entry_json)
return resp_json
class Contract(BaseContract):
""" Contract : Class for Contracts """
def __init__(self, contract_name, parent=None):
super(Contract, self).__init__(contract_name, parent)
@staticmethod
def _get_contract_code():
"""
Returns the APIC class name for this type of contract.
:returns: String containing APIC class name for this type of contract.
"""
return 'vzBrCP'
@staticmethod
def _get_subject_code():
return 'vzSubj'
@staticmethod
def _get_subject_relation_code():
return 'vzRsSubjFiltAtt'
@staticmethod
def _get_parent_dn(dn):
return dn.split('/brc-')[0]
@staticmethod
def _get_name_from_dn(dn):
name = dn.split('/brc-')[1].split('/')[0]
return name
def _generate_attributes(self):
attributes = super(Contract, self)._generate_attributes()
attributes['scope'] = self.get_scope()
return attributes
@classmethod
def get_deep(cls, full_data, working_data, parent=None, limit_to=[], subtree='full', config_only=False):
contract_data = working_data[0]['vzBrCP']
contract = Contract(str(contract_data['attributes']['name']),
parent)
if 'children' not in contract_data:
return
for child in contract_data['children']:
if 'vzSubj' in child:
subject = child['vzSubj']
if 'children' not in subject:
continue
for subj_child in subject['children']:
if 'vzRsSubjFiltAtt' in subj_child:
filter_attributes = subj_child['vzRsSubjFiltAtt']['attributes']
filter_name = filter_attributes['tnVzFilterName']
for filter in full_data[0]['fvTenant']['children']:
if 'vzFilter' in filter:
match_name = filter['vzFilter']['attributes']['name']
if match_name == filter_name:
for entry in filter['vzFilter']['children']:
if 'vzEntry' in entry:
entry_obj = FilterEntry.create_from_apic_json(entry, contract)
@classmethod
def get(cls, session, tenant):
"""Gets all of the Contracts from the APIC for a particular tenant.
"""
return BaseACIObject.get(session, cls, cls._get_contract_code(),
tenant, tenant)
@staticmethod
def get_table(contracts, title=''):
"""
Will create of each contract
:param title:
:param contracts:
"""
result = []
headers = ['Tenant', 'Contract', 'Scope', 'Filter']
for contract in sorted(contracts, key=lambda x: (x.name)):
data = []
for filter in contract.get_children(FilterEntry):
data.append([
contract.get_parent().name,
contract.name,
contract.get_scope(),
filter.name,
])
result.append(Table(data, headers, title=title + 'Contract:{0}'.format(contract.name)))
return result
class Taboo(BaseContract):
""" Taboo : Class for Taboos """
def __init__(self, contract_name, parent=None):
super(Taboo, self).__init__(contract_name, parent)
@staticmethod
def _get_contract_code():
"""
Returns the APIC class name for this type of contract.
:returns: String containing APIC class name for this type of contract.
"""
return 'vzTaboo'
@staticmethod
def _get_subject_code():
return 'vzTSubj'
@staticmethod
def _get_subject_relation_code():
return 'vzRsDenyRule'
@staticmethod
def _get_parent_dn(dn):
return dn.split('/taboo-')[0]
@staticmethod
def _get_name_from_dn(dn):
name = dn.split('/taboo-')[1].split('/')[0]
return name
@staticmethod
def get_table(taboos, title=''):
"""
Will create table of taboo information for a given tenant
:param title:
:param taboos:
"""
result = []
headers = ['Tenant', 'Taboo', 'Scope']
data = []
for taboo in sorted(taboos, key=lambda x: (x.name)):
data.append([
taboo.get_parent().name,
taboo.name,
taboo.get_scope(),
])
result.append(Table(data, headers, title=title + 'Taboo:{0}'.format(taboo.name)))
return result
class FilterEntry(BaseACIObject):
""" | |
"""
Aggregate tools
===============
"""
import sys
import numpy
from .._lib.hashmap import factorize
from ..compat import tqdm
from ..ds.scaling import linearscaling
from .arrays import first, lexsort_uint32_pair, to_structured
def igroupby(ids, values, n=None, logging_prefix=None, assume_sorted=False,
find_next_hint=512):
"""
Efficiently converts two arrays representing a relation
(the ``ids`` and the associated ``values``) to an iterable ``(id, values_associated)``.
The ``values`` are grouped by ``ids`` and a sequence of tuples is generated.
The ``i`` th tuple generated is ``(id_i, values[ids == id_i])``,
``id_i`` being the ``i`` th element of the ``ids`` array, once sorted in ascending order.
:param array ids: ``(>=n,) dtype array``
:param array values: ``(>=n, *shape) uint32 array``
:param int? n: length of array to consider,
applying igroupby to ``(ids[:n], values[:n])``. Uses full array when not set.
:param string? logging_prefix: prefix to include while logging progress.
``(default:`` Does not log``)``.
:param bool? assume_sorted: whether ids is sorted. ``(default: False)``
:param int? find_next_hint: hint for find_next_lookup. ``(default: 512)``
:generates: tuple(id:int, values_associated:``(m, *shape) array slice``)
Example
_______
>>> ids = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> gen = igroupby(ids, values)
>>> next(gen)
(0, array([0, 1, 2, 3, 4]))
>>> next(gen)
(1, array([0, 2, 4, 6]))
>>> next(gen)
(3, array([0, 4, 6]))
Example with strings as ids:
>>> ids = numpy.array(["alpha", "alpha", "beta", "omega", "alpha", "gamma", "beta"])
>>> values = numpy.array([1, 2, 10, 100, 3, 1000, 20])
>>> gen = igroupby(ids, values)
>>> next(gen)
('alpha', array([1, 2, 3]))
>>> next(gen)
('beta', array([10, 20]))
>>> next(gen)
('gamma', array([1000]))
>>> next(gen)
('omega', array([100]))
"""
# convert to numpy arrays
ids = numpy.asarray(ids)
values = numpy.asarray(values)
# check input shape
assert len(ids.shape) == 1
if n is None:
n = ids.shape[0]
assert ids.shape[0] >= n and values.shape[0] >= n, values.shape
# sort if needed
if not assume_sorted:
ids = ids[:n]
values = values[:n]
asort = numpy.argsort(ids)
ids = ids[asort]
values = values[asort]
# init
start_block = 0
find_next_lookup = find_next_hint
# search next change block by block
disable = logging_prefix is None
with tqdm(total=n, desc=logging_prefix, disable=disable, file=sys.stdout) as pbar:
while start_block < n:
# find all items having id by block boundaries
current_id = ids[start_block]
try:
end_block = first(ids,
lambda x: x != current_id,
offset=start_block,
batch_size=find_next_lookup)
find_next_lookup = max(
find_next_hint, 2 * (end_block - start_block))
except StopIteration:
end_block = n
current_id_values = values[start_block:end_block]
assert (ids[start_block:end_block] == current_id).all()
pbar.update(end_block - start_block)
start_block = end_block
yield current_id, current_id_values
def ufunc_group_by_idx(idx, values, ufunc, init, minlength=None):
"""
Abstract wrapper to compute ufunc grouped by values in array ``idx``.
Return an array containing the results of ``ufunc`` applied to ``values``
grouped by the indexes in array ``idx``.
(See available ufuncs `here <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_).
Warning: the ``init`` parameter is not a filling value for missing indexes.
If index ``i`` is missing, then ``out[i] = init``
but this value also serves as the initialization of ``ufunc`` on all the groups of ``values``.
For example, if ``ufunc`` is ``numpy.add`` and ``init = -1`` then for each index,
the sum of the corresponding values will be decreased by one.
:param array idx: ``(n,) int array``
:param array values: ``(n,) dtype array``
:param numpy.ufunc ufunc: universal function applied to the groups of ``values``
:param dtype init: initialization value
:param int? minlength: ``(default: idx.max() + 1)``
:returns: (min-length,) dtype array, such that ``out[i] = ufunc(values[idx==i])``
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> ufunc_group_by_idx(idx, values, numpy.maximum, -1)
array([ 4, 6, -1, 6])
>>> ufunc_group_by_idx(idx, values, numpy.add, -1)
array([ 9, 11, -1, 9])
>>> ufunc_group_by_idx(idx, values, numpy.add, 0)
array([ 10, 12, -0, 10])
"""
length = max(idx.max() + 1, minlength or 0)
out = numpy.full(length, init)
ufunc.at(out, idx, values)
return out
def min_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the max value by idx, aligned on ``arange(idx.max() + 1)``.
See also ``argmin_by_idx`` and ``value_at_argmin_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: +inf)
:returns: (min-length,) float array, such that out[i] = min(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([1, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> min_by_idx(idx, values, fill=100)
array([ 1, 0, 100, 0])
>>> min_by_idx(idx, values)
array([1, 0, 9223372036854775807, 0])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where (idx >= 0).all()')
if fill is None:
fill = numpy.inf if values.dtype.kind == 'f' else numpy.iinfo(values.dtype).max
else:
assert fill >= values.max()
return ufunc_group_by_idx(idx, values, numpy.minimum, fill, minlength=minlength)
def max_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the max value by idx, aligned on ``arange(idx.max() + 1)``.
See also ``argmax_by_idx`` and ``value_at_argmax_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: -inf)
:returns: (min-length,) float array, such that out[i] = max(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> max_by_idx(idx, values, fill=-1)
array([ 4, 6, -1, 6])
>>> max_by_idx(idx, values, minlength=10, fill=-1)
array([ 4, 6, -1, 6, -1, -1, -1, -1, -1, -1])
>>> max_by_idx(idx, values)
array([ 4, 6, -9223372036854775808, 6])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
if fill is None:
fill = - numpy.inf if values.dtype.kind == 'f' else numpy.iinfo(values.dtype).min
else:
assert fill <= values.min()
return ufunc_group_by_idx(idx, values, numpy.maximum, fill, minlength=minlength)
def argmin_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the argmin of the values by idx,
aligned on ``arange(idx.max() + 1)``.
See also ``min_by_idx`` and ``value_at_argmin_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: -1)
:returns: (min-length,) int32 array, such that
out[i] = argmin_{idx}(values[idx] : idx[idx] == i)
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> argmin_by_idx(idx, values, fill=-1)
array([ 0, 5, -1, 9])
>>> argmin_by_idx(idx, values, minlength=10, fill=-1)
array([ 0, 5, -1, 9, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
if fill is None:
fill = -1
min_values_by_idx = min_by_idx(idx, values, minlength) # (n-idx,)
is_min = values == min_values_by_idx[idx]
out = numpy.full(min_values_by_idx.size, fill)
out[idx[is_min]] = numpy.where(is_min)[0]
return out
# TODO: improve test
def value_at_argmin_by_idx(idx, sorting_values, fill, output_values=None, minlength=None):
"""
Wrapper around argmin_by_idx and get_value_by_idx.
Allows to use a different value for the output and for detecting the minimum
Allows to set a specific fill value that is not compared with the sorting_values
:param array idx: (n,) uint array with values < max_idx
:param array values: (n,) array
:param fill: filling value for output[i] if there is no idx == i
:param array? output_values: (n,) dtype array
Useful if you want to select the min based on one array,
and get the value on another array
:param int? minlength: minimum shape for the output array.
:returns array: (max_idx+1,), dtype array such that
out[i] = min(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, | |
import torch
import numpy as np
import pandas as pd
import scipy.stats as stats
import sys
import os
import time
from collections import OrderedDict
sys.path.insert(1, os.path.dirname(__file__))
import genotypeio, eigenmt
from core import *
import imp
import core
imp.reload(core)
from core import *
imp.reload(eigenmt)
def calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=None, return_af=True):
"""
Calculate nominal associations
genotypes_t: genotypes x samples
phenotype_t: single phenotype
residualizer: Residualizer object (see core.py)
"""
p = phenotype_t.reshape(1,-1)
r_nominal_t, genotype_var_t, phenotype_var_t = calculate_corr(genotypes_t, p, residualizer=residualizer, return_var=True)
std_ratio_t = torch.sqrt(phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1))
r_nominal_t = r_nominal_t.squeeze()
r2_nominal_t = r_nominal_t.double().pow(2)
if residualizer is not None:
dof = residualizer.dof
else:
dof = p.shape[1] - 2
slope_t = r_nominal_t * std_ratio_t.squeeze()
tstat_t = r_nominal_t * torch.sqrt(dof / (1 - r2_nominal_t))
slope_se_t = (slope_t.double() / tstat_t).float()
# tdist = tfp.distributions.StudentT(np.float64(dof), loc=np.float64(0.0), scale=np.float64(1.0))
# pval_t = tf.scalar_mul(2, tdist.cdf(-tf.abs(tstat)))
if return_af:
af_t, ma_samples_t, ma_count_t = get_allele_stats(genotypes_t)
return tstat_t, slope_t, slope_se_t, af_t, ma_samples_t, ma_count_t
else:
return tstat_t, slope_t, slope_se_t
def calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=None, random_tiebreak=False):
"""Calculate nominal and empirical correlations"""
permutations_t = phenotype_t[permutation_ix_t]
r_nominal_t, genotype_var_t, phenotype_var_t = calculate_corr(genotypes_t, phenotype_t.reshape(1,-1),
residualizer=residualizer, return_var=True)
std_ratio_t = torch.sqrt(phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1))
r_nominal_t = r_nominal_t.squeeze(dim=-1)
std_ratio_t = std_ratio_t.squeeze(dim=-1)
corr_t = calculate_corr(genotypes_t, permutations_t, residualizer=residualizer).pow(2) # genotypes x permutations
corr_t = corr_t[~torch.isnan(corr_t).any(1),:]
if corr_t.shape[0] == 0:
raise ValueError('All correlations resulted in NaN. Please check phenotype values.')
r2_perm_t,_ = corr_t.max(0) # maximum correlation across permutations
r2_nominal_t = r_nominal_t.pow(2)
r2_nominal_t[torch.isnan(r2_nominal_t)] = -1 # workaround for nanargmax()
if not random_tiebreak:
ix = r2_nominal_t.argmax()
else:
ix = torch.nonzero(r2_nominal_t == r2_nominal_t.max(), as_tuple=True)[0]
ix = ix[torch.randint(0, len(ix), [1])[0]]
return r_nominal_t[ix], std_ratio_t[ix], ix, r2_perm_t, genotypes_t[ix]
def calculate_association(genotype_df, phenotype_s, covariates_df=None,
interaction_s=None, maf_threshold_interaction=0.05,
window=1000000, verbose=True):
"""
Standalone helper function for computing the association between
a set of genotypes and a single phenotype.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
assert genotype_df.columns.equals(phenotype_s.index)
# copy to GPU
phenotype_t = torch.tensor(phenotype_s.values, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotype_df.values, dtype=torch.float).to(device)
impute_mean(genotypes_t)
dof = phenotype_s.shape[0] - 2
if covariates_df is not None:
assert phenotype_s.index.equals(covariates_df.index)
residualizer = Residualizer(torch.tensor(covariates_df.values, dtype=torch.float32).to(device))
dof -= covariates_df.shape[1]
else:
residualizer = None
if interaction_s is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer)
tstat, slope, slope_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
df = pd.DataFrame({
'pval_nominal':2*stats.t.cdf(-np.abs(tstat), dof),
'slope':slope, 'slope_se':slope_se,
'tstat':tstat, 'af':af, 'ma_samples':ma_samples, 'ma_count':ma_count,
}, index=genotype_df.index)
else:
interaction_t = torch.tensor(interaction_s.values.reshape(1,-1), dtype=torch.float32).to(device)
if maf_threshold_interaction > 0:
mask_s = pd.Series(True, index=interaction_s.index)
mask_s[interaction_s.sort_values(kind='mergesort').index[:interaction_s.shape[0]//2]] = False
interaction_mask_t = torch.BoolTensor(mask_s).to(device)
else:
interaction_mask_t = None
genotypes_t, mask_t = filter_maf_interaction(genotypes_t, interaction_mask_t=interaction_mask_t,
maf_threshold_interaction=maf_threshold_interaction)
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t, residualizer,
return_sparse=False)
tstat, b, b_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
mask = mask_t.cpu().numpy()
dof -= 2
df = pd.DataFrame({
'pval_g':2*stats.t.cdf(-np.abs(tstat[:,0]), dof), 'b_g':b[:,0], 'b_g_se':b_se[:,0],
'pval_i':2*stats.t.cdf(-np.abs(tstat[:,1]), dof), 'b_i':b[:,1], 'b_i_se':b_se[:,1],
'pval_gi':2*stats.t.cdf(-np.abs(tstat[:,2]), dof), 'b_gi':b[:,2], 'b_gi_se':b_se[:,2],
'af':af, 'ma_samples':ma_samples, 'ma_count':ma_count,
}, index=genotype_df.index[mask])
if df.index.str.startswith('chr').all(): # assume chr_pos_ref_alt_build format
df['position'] = df.index.map(lambda x: int(x.split('_')[1]))
return df
def map_nominal(genotype_df, variant_df, phenotype_df, phenotype_pos_df, prefix,
covariates_df=None, maf_threshold=0, interaction_df=None, maf_threshold_interaction=0.05,
group_s=None, window=1000000, run_eigenmt=False,
output_dir='.', write_top=True, write_stats=True, logger=None, verbose=True):
"""
cis-QTL mapping: nominal associations for all variant-phenotype pairs
Association results for each chromosome are written to parquet files
in the format <output_dir>/<prefix>.cis_qtl_pairs.<chr>.parquet
If interaction_df is provided, the top association per phenotype is
written to <output_dir>/<prefix>.cis_qtl_top_assoc.txt.gz unless
write_top is set to False, in which case it is returned as a DataFrame
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
if group_s is not None:
group_dict = group_s.to_dict()
logger.write('cis-QTL mapping: nominal associations for all variant-phenotype pairs')
logger.write(f' * {phenotype_df.shape[1]} samples')
logger.write(f' * {phenotype_df.shape[0]} phenotypes')
if covariates_df is not None:
assert np.all(phenotype_df.columns==covariates_df.index)
logger.write(f' * {covariates_df.shape[1]} covariates')
residualizer = Residualizer(torch.tensor(covariates_df.values, dtype=torch.float32).to(device))
dof = phenotype_df.shape[1] - 2 - covariates_df.shape[1]
else:
residualizer = None
dof = phenotype_df.shape[1] - 2
logger.write(f' * {variant_df.shape[0]} variants')
if interaction_df is not None:
assert interaction_df.index.equals(phenotype_df.columns)
logger.write(f" * including {interaction_df.shape[1]} interaction term(s)")
if maf_threshold_interaction > 0:
logger.write(f' * using {maf_threshold_interaction:.2f} MAF threshold')
elif maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter')
genotype_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype_df.columns])
genotype_ix_t = torch.from_numpy(genotype_ix).to(device)
if interaction_df is not None:
ni = interaction_df.shape[1]
dof -= 2 * ni
interaction_t = torch.tensor(interaction_df.values, dtype=torch.float32).to(device)
if maf_threshold_interaction > 0 and ni == 1:
mask_s = pd.Series(True, index=interaction_df.index)
mask_s[interaction_df[interaction_df.columns[0]].sort_values(kind='mergesort').index[:interaction_df.shape[0]//2]] = False
interaction_mask_t = torch.BoolTensor(mask_s).to(device)
else:
# TODO: implement filtering for multiple interactions?
interaction_mask_t = None
if ni == 1:
col_order = ['phenotype_id', 'variant_id', 'tss_distance', 'af', 'ma_samples', 'ma_count', 'pval_g', 'b_g', 'b_g_se',
'pval_i', 'b_i', 'b_i_se', 'pval_gi', 'b_gi', 'b_gi_se']
else:
col_order = (['phenotype_id', 'variant_id', 'tss_distance', 'af', 'ma_samples', 'ma_count', 'pval_g', 'b_g', 'b_g_se'] +
[k.replace('i', f"i{i+1}") for i in range(0,ni) for k in ['pval_i', 'b_i', 'b_i_se', 'pval_gi', 'b_gi', 'b_gi_se']])
# use column names instead of numbered interaction variables in output files
var_dict = []
for i,v in enumerate(interaction_df.columns, 1):
for c in ['pval_i', 'b_i', 'b_i_se']:
var_dict.append((c.replace('_i', f'_i{i}'), c.replace('_i', f'_{v}')))
for c in ['pval_gi', 'b_gi', 'b_gi_se']:
var_dict.append((c.replace('_gi', f'_gi{i}'), c.replace('_gi', f'_g-{v}')))
var_dict = dict(var_dict)
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, group_s=group_s, window=window)
# iterate over chromosomes
best_assoc = []
start_time = time.time()
k = 0
logger.write(' * Computing associations')
for chrom in igc.chrs:
logger.write(f' Mapping chromosome {chrom}')
# allocate arrays
n = 0 # number of pairs
if group_s is None:
for i in igc.phenotype_pos_df[igc.phenotype_pos_df['chr'] == chrom].index:
j = igc.cis_ranges[i]
n += j[1] - j[0] + 1
else:
for i in igc.group_s[igc.phenotype_pos_df['chr'] == chrom].drop_duplicates().index:
j = igc.cis_ranges[i]
n += j[1] - j[0] + 1
chr_res = OrderedDict()
chr_res['phenotype_id'] = []
chr_res['variant_id'] = []
chr_res['tss_distance'] = np.empty(n, dtype=np.int32)
chr_res['af'] = np.empty(n, dtype=np.float32)
chr_res['ma_samples'] = np.empty(n, dtype=np.int32)
chr_res['ma_count'] = np.empty(n, dtype=np.int32)
if interaction_df is None:
chr_res['pval_nominal'] = np.empty(n, dtype=np.float64)
chr_res['slope'] = np.empty(n, dtype=np.float32)
chr_res['slope_se'] = np.empty(n, dtype=np.float32)
else:
chr_res['pval_g'] = np.empty(n, dtype=np.float64)
chr_res['b_g'] = np.empty(n, dtype=np.float32)
chr_res['b_g_se'] = np.empty(n, dtype=np.float32)
chr_res['pval_i'] = np.empty([n, ni], dtype=np.float64)
chr_res['b_i'] = np.empty([n, ni], dtype=np.float32)
chr_res['b_i_se'] = np.empty([n, ni], dtype=np.float32)
chr_res['pval_gi'] = np.empty([n, ni], dtype=np.float64)
chr_res['b_gi'] = np.empty([n, ni], dtype=np.float32)
chr_res['b_gi_se'] = np.empty([n, ni], dtype=np.float32)
start = 0
if group_s is None:
for k, (phenotype, genotypes, genotype_range, phenotype_id) in enumerate(igc.generate_data(chrom=chrom, verbose=verbose), k+1):
# copy genotypes to GPU
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
variant_ids = variant_df.index[genotype_range[0]:genotype_range[-1]+1]
tss_distance = np.int32(variant_df['pos'].values[genotype_range[0]:genotype_range[-1]+1] - igc.phenotype_tss[phenotype_id])
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
variant_ids = variant_ids[mask]
tss_distance = tss_distance[mask]
if interaction_df is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=residualizer)
tstat, slope, slope_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
n = len(variant_ids)
else:
genotypes_t, mask_t = filter_maf_interaction(genotypes_t, interaction_mask_t=interaction_mask_t,
maf_threshold_interaction=maf_threshold_interaction)
if genotypes_t.shape[0] > 0:
mask = mask_t.cpu().numpy()
variant_ids = variant_ids[mask]
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t,
residualizer=residualizer, return_sparse=False,
variant_ids=variant_ids)
tstat, b, b_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
tss_distance = tss_distance[mask]
n = len(variant_ids)
# top association
ix = np.nanargmax(np.abs(tstat[:,1+ni:]).max(1)) # top association among all interactions tested
# index order: 0, 1, 1+ni, 2, 2+ni, 3, 3+ni, ...
order = [0] + [i if j % 2 == 0 else i+ni for i in range(1,ni+1) for j in range(2)]
top_s = [phenotype_id, variant_ids[ix], tss_distance[ix], af[ix], ma_samples[ix], ma_count[ix]]
for i in order:
top_s += [tstat[ix,i], b[ix,i], b_se[ix,i]]
top_s = pd.Series(top_s, index=col_order)
if run_eigenmt: # compute eigenMT correction
top_s['tests_emt'] = eigenmt.compute_tests(genotypes_t, var_thresh=0.99, variant_window=200)
best_assoc.append(top_s)
else: # all genotypes in window were filtered out
n = 0
if n > 0:
chr_res['phenotype_id'].extend([phenotype_id]*n)
chr_res['variant_id'].extend(variant_ids)
chr_res['tss_distance'][start:start+n] = tss_distance
chr_res['af'][start:start+n] = af
chr_res['ma_samples'][start:start+n] = ma_samples
chr_res['ma_count'][start:start+n] = ma_count
if interaction_df is None:
chr_res['pval_nominal'][start:start+n] = tstat
chr_res['slope'][start:start+n] = slope
chr_res['slope_se'][start:start+n] = slope_se
else:
# columns: [g, i_1 ... i_n, gi_1, ... gi_n] --> 0, 1:1+ni, 1+ni:1+2*ni
chr_res['pval_g'][start:start+n] = tstat[:,0]
chr_res['b_g'][start:start+n] = b[:,0]
chr_res['b_g_se'][start:start+n] = b_se[:,0]
chr_res['pval_i'][start:start+n] = tstat[:,1:1+ni]
chr_res['b_i'][start:start+n] = b[:,1:1+ni]
chr_res['b_i_se'][start:start+n] = b_se[:,1:1+ni]
chr_res['pval_gi'][start:start+n] = tstat[:,1+ni:]
chr_res['b_gi'][start:start+n] = b[:,1+ni:]
chr_res['b_gi_se'][start:start+n] = b_se[:,1+ni:]
start += n # update pointer
else: # groups
for k, (phenotypes, genotypes, genotype_range, phenotype_ids, group_id) in enumerate(igc.generate_data(chrom=chrom, verbose=verbose), k+1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
variant_ids = variant_df.index[genotype_range[0]:genotype_range[-1]+1]
# assuming that the TSS for all grouped phenotypes is the same
tss_distance = np.int32(variant_df['pos'].values[genotype_range[0]:genotype_range[-1]+1] - igc.phenotype_tss[phenotype_ids[0]])
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
variant_ids | |
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
if not external_cursor:
cursor.close()
connection.commit()
connection.close()
return engine_temp
def _check_transfer(self, connection, cursor, transfer_id, user_id):
try:
sel = cursor.execute(
'''SELECT id
FROM transfers
WHERE id=? and
user_id=? LIMIT 1''',
[transfer_id, user_id])
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
try:
six.next(sel)
except StopIteration:
six.reraise(
UnknownObjectError,
UnknownObjectError("The transfer " + repr(transfer_id)
+ " is not valid or does not belong to "
"user " + repr(user_id)),
sys.exc_info[2])
def _check_temporary(self, connection, cursor, temp_path_id, user_id):
try:
sel = cursor.execute(
'''SELECT temp_path_id
FROM temporary_paths
WHERE temp_path_id=? and
user_id=? LIMIT 1''',
[temp_path_id, user_id])
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
try:
six.next(sel)
except StopIteration:
six.reraise(
UnknownObjectError,
UnknownObjectError("The temporary path " + repr(temp_path_id)
+ " is not valid or does not belong to "
"user " + repr(user_id)),
sys.exc_info[2])
def remove_transfer(self, transfer_id, user_id):
'''
Set the expiration date of the transfer associated to the engine file path
to today (yesterday?). That way it will be disposed as soon as no job will need it.
Parameters
----------
transfer_id: int
transfer identifier record to delete.
user_id: int
user identifier
'''
self.logger.debug("=> remove_transfer")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_transfer(connection, cursor, transfer_id, user_id)
yesterday = date.today() - timedelta(days=1)
try:
cursor.execute(
'UPDATE transfers SET expiration_date=? WHERE id=?',
(yesterday, transfer_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
self.clean()
def remove_temporary(self, temp_path_id, user_id):
'''
Set the expiration date of the temporary_paths associated to the engine
file path to today (yesterday?). That way it will be disposed as soon as no
job will need it.
Parameters
----------
temp_path_id: int
identifying the temporary path record to delete.
user_id: int
user identifier
'''
self.logger.debug("=> remove_temporary")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_temporary(connection, cursor, temp_path_id, user_id)
yesterday = date.today() - timedelta(days=1)
try:
cursor.execute(
'UPDATE temporary_paths SET expiration_date=? WHERE temp_path_id=?', (yesterday, temp_path_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
self.clean()
def get_transfer_information(self,
transfer_id,
user_id):
'''
Returns the information related to the transfer associated to the engine file path.
The transfer_id must be associated to a transfer.
Returns (None, None, None, -1, None) if the transfer_id is not associated to a transfer.
Parameters
----------
transfer_id: int
identier
user_id: int
user identifier
Returns
-------
info: tuple
(tranfer_id, engine_file_path, client_file_path, expiration_date, workflow_id, client_paths, transfer_type, status)
'''
self.logger.debug("=> get_transfer_information")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_transfer(connection, cursor, transfer_id, user_id)
try:
(engine_file_path,
client_file_path,
expiration_date,
workflow_id,
client_paths,
transfer_type,
status) = six.next(cursor.execute(
'''SELECT
engine_file_path,
client_file_path,
expiration_date,
workflow_id,
client_paths,
transfer_type,
status
FROM transfers
WHERE id=?''',
[transfer_id]))
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
engine_file_path = self._string_conversion(engine_file_path)
client_file_path = self._string_conversion(client_file_path)
expiration_date = self._str_to_date_conversion(expiration_date)
if client_paths:
client_paths = self._string_conversion(
client_paths).split(file_separator)
else:
client_path = None
transfer_type = self._string_conversion(transfer_type)
status = self._string_conversion(status)
cursor.close()
connection.close()
return (transfer_id,
engine_file_path,
client_file_path,
expiration_date,
workflow_id,
client_paths,
transfer_type,
status)
def get_temporary_information(self,
temp_path_id,
user_id):
'''
Returns the information related to the temporary path associated to the id.
The temp_path_id must be associated to a TemporaryPath.
Returns (None, None, None, None, None) if the temp_path_id is not associated to a temporary path.
Parameters
----------
temp_path_id: int
identifier
Returns
-------
info: tuple
(temp_path_id, engine_file_path, expiration_date, workflow_id, status)
'''
self.logger.debug("=> get_temporary_information")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_temporary(connection, cursor, temp_path_id, user_id)
try:
(engine_file_path,
expiration_date,
workflow_id,
status) = six.next(cursor.execute(
'''SELECT
engine_file_path,
expiration_date,
workflow_id,
status
FROM temporary_paths
WHERE temp_path_id=?''',
[temp_path_id]))
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
engine_file_path = self._string_conversion(engine_file_path)
expiration_date = self._str_to_date_conversion(expiration_date)
status = self._string_conversion(status)
cursor.close()
connection.close()
return (temp_path_id,
engine_file_path,
expiration_date,
workflow_id,
status)
def get_transfer_status(self, transfer_id, user_id):
'''
Returns the transfer status stored in the database.
'''
self.logger.debug("=> get_transfer_status")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_transfer(connection, cursor, transfer_id, user_id)
try:
status = six.next(cursor.execute(
'SELECT status FROM transfers WHERE id=?',
[transfer_id]))[0]
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
status = self._string_conversion(status)
cursor.close()
connection.close()
return status
def get_temporary_status(self, temp_path_id, user_id):
'''
Returns the temporary path status stored in the database.
'''
self.logger.debug("=> get_temporary_status")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_temporary(connection, cursor, temp_path_id, user_id)
try:
status = six.next(cursor.execute(
'SELECT status FROM temporary_paths WHERE temp_path_id=?',
[temp_path_id]))[0]
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
status = self._string_conversion(status)
cursor.close()
connection.close()
return status
def set_transfer_status(self, transfer_id, status):
'''
Updates the transfer status in the database.
The status must be valid (ie a string among the transfer status
string defined in constants.FILE_TRANSFER_STATUS
Parameters
----------
transfer_id: int
transfer identifier
status: string
transfer status as defined in constants.FILE_TRANSFER_STATUS
'''
# if type(engine_file_path) is int:
# return self.set_temporary_status(engine_file_path, status)
self.logger.debug("=> set_transfer_status")
with self._lock:
# TBI if the status is not valid raise an exception ??
connection = self._connect()
cursor = connection.cursor()
try:
cursor.execute(
'UPDATE transfers SET status=? WHERE id=?',
(status, transfer_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def set_transfer_paths(self, transfer_id, engine_path, client_path,
client_paths):
'''
Updates the transfer paths in the database.
Parameters
----------
transfer_id: int
transfer identifier
engine_path: str
path on engine side
client_path: str
path on client side
client_paths: list
filenames on client side
'''
# if type(engine_file_path) is int:
# return self.set_temporary_status(engine_file_path, status)
self.logger.debug("=> set_transfer_paths")
with self._lock:
# TBI if the status is not valid raise an exception ??
connection = self._connect()
cursor = connection.cursor()
if client_paths:
client_paths = file_separator.join(client_paths)
try:
cursor.execute(
'''UPDATE transfers SET
engine_file_path=?,
client_file_path=?,
client_paths=?
WHERE id=?''',
(engine_path, client_path, client_paths, transfer_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def set_temporary_status(self, temp_path_id, status):
'''
Updates the temporary path status in the database.
The status must be valid (ie a string among the transfer status
string defined in constants.FILE_TRANSFER_STATUS
@type status: string
@param status: transfer status as defined in constants.FILE_TRANSFER_STATUS
'''
self.logger.debug("=> set_temporary_status")
with self._lock:
# TBI if the status is not valid raise an exception ??
connection = self._connect()
cursor = connection.cursor()
try:
cursor.execute(
'UPDATE temporary_paths SET status=? WHERE temp_path_id=?',
(status, temp_path_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def set_transfer_type(self, transfer_id, transfer_type, user_id):
self.logger.debug("=> set_transfer_type")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
try:
cursor.execute(
'UPDATE transfers SET transfer_type=? WHERE id=?', (transfer_type, transfer_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def add_workflow_ended_transfer(self, workflow_id, transfer_id):
'''
To signal that a transfer belonging to a workflow finished.
'''
self.logger.debug("=> add_workflow_ended_transfer")
separator = ", "
with self._lock:
connection = self._connect()
cursor = connection.cursor()
try:
str_ended_transfers = six.next(cursor.execute(
'SELECT ended_transfers FROM workflows WHERE id=?',
[workflow_id]))[0]
if str_ended_transfers != None:
ended_transfers = self._string_conversion(
str_ended_transfers).split(separator)
ended_transfers.append(str(transfer_id))
str_ended_transfers = separator.join(ended_transfers)
else:
str_ended_transfers = transfer_id
cursor.execute(
'UPDATE workflows SET ended_transfers=? WHERE id=?',
(str_ended_transfers, workflow_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def pop_workflow_ended_transfer(self, workflow_id):
'''
Returns the ended transfers for a workflow and clear the ended transfer list.
'''
self.logger.debug("=> pop_workflow_ended_transfer")
separator = ", "
ended_transfers = []
with self._lock:
connection = self._connect()
cursor = connection.cursor()
try:
str_ended_transfers = six.next(cursor.execute(
'SELECT ended_transfers FROM workflows WHERE id=?',
[workflow_id]))[0]
if str_ended_transfers != None:
ended_transfers = self._string_conversion(
str_ended_transfers).split(separator)
cursor.execute(
'UPDATE workflows SET ended_transfers=? WHERE id=?',
(None, workflow_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
return ended_transfers
#
# WORKFLOWS
def add_workflow(self,
user_id,
engine_workflow,
login=None):
'''
Register a workflow to the database and returns identifiers for every
workflow element.
* user_id *string*
User identifier
* engine_workflow *EngineWorkflow*
* returns: * tuple(string, dictionary, dictionary)*
* workflow identifier
* dictionary tr_id -> EngineTransfer
* dictionary job_id -> EngineJob
'''
# get back the workflow id first
self.logger.debug("=> add_workflow")
with self._lock:
# try to allocate enough file counters before opening a new cursor
needed_files = len(engine_workflow.transfer_mapping) \
+ len(engine_workflow.job_mapping) * 2
self.ensure_file_numbers_available(needed_files)
connection = self._connect()
cursor = connection.cursor()
name = None
if engine_workflow.name != None:
name = six.ensure_text(engine_workflow.name, 'utf8')
try:
cursor.execute('''INSERT INTO workflows
(user_id,
pickled_engine_workflow,
expiration_date,
name,
status,
last_status_update,
queue)
VALUES (?, ?, ?, ?, | |
from xapi import success, Rpc_light_failure, InternalError, UnmarshalException, TypeError, is_long, UnknownMethod
import xapi
import sys
import json
import argparse
import traceback
import logging
class Unimplemented(Rpc_light_failure):
def __init__(self, arg_0):
Rpc_light_failure.__init__(self, "Unimplemented", [ arg_0 ])
if not isinstance(arg_0, str) and not isinstance(arg_0, unicode):
raise (TypeError("string", repr(arg_0)))
self.arg_0 = arg_0
class Datapath_server_dispatcher:
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
def __init__(self, impl):
"""impl is a proxy object whose methods contain the implementation"""
self._impl = impl
def open(self, args):
"""type-check inputs, call implementation, type-check outputs and return"""
if not isinstance(args, dict):
raise (UnmarshalException('arguments', 'dict', repr(args)))
if not('dbg' in args):
raise UnmarshalException('argument missing', 'dbg', '')
dbg = args["dbg"]
if not isinstance(dbg, str) and not isinstance(dbg, unicode):
raise (TypeError("string", repr(dbg)))
if not('uri' in args):
raise UnmarshalException('argument missing', 'uri', '')
uri = args["uri"]
if not isinstance(uri, str) and not isinstance(uri, unicode):
raise (TypeError("string", repr(uri)))
if not('persistent' in args):
raise UnmarshalException('argument missing', 'persistent', '')
persistent = args["persistent"]
if not isinstance(persistent, bool):
raise (TypeError("bool", repr(persistent)))
results = self._impl.open(dbg, uri, persistent)
return results
def attach(self, args):
"""type-check inputs, call implementation, type-check outputs and return"""
if not isinstance(args, dict):
raise (UnmarshalException('arguments', 'dict', repr(args)))
if not('dbg' in args):
raise UnmarshalException('argument missing', 'dbg', '')
dbg = args["dbg"]
if not isinstance(dbg, str) and not isinstance(dbg, unicode):
raise (TypeError("string", repr(dbg)))
if not('uri' in args):
raise UnmarshalException('argument missing', 'uri', '')
uri = args["uri"]
if not isinstance(uri, str) and not isinstance(uri, unicode):
raise (TypeError("string", repr(uri)))
if not('domain' in args):
raise UnmarshalException('argument missing', 'domain', '')
domain = args["domain"]
if not isinstance(domain, str) and not isinstance(domain, unicode):
raise (TypeError("string", repr(domain)))
results = self._impl.attach(dbg, uri, domain)
if not isinstance(results['domain_uuid'], str) and not isinstance(results['domain_uuid'], unicode):
raise (TypeError("string", repr(results['domain_uuid'])))
if results['implementation'][0] == 'Blkback':
if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode):
raise (TypeError("string", repr(results['implementation'][1])))
elif results['implementation'][0] == 'Tapdisk3':
if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode):
raise (TypeError("string", repr(results['implementation'][1])))
elif results['implementation'][0] == 'Qdisk':
if not isinstance(results['implementation'][1], str) and not isinstance(results['implementation'][1], unicode):
raise (TypeError("string", repr(results['implementation'][1])))
return results
def activate(self, args):
"""type-check inputs, call implementation, type-check outputs and return"""
if not isinstance(args, dict):
raise (UnmarshalException('arguments', 'dict', repr(args)))
if not('dbg' in args):
raise UnmarshalException('argument missing', 'dbg', '')
dbg = args["dbg"]
if not isinstance(dbg, str) and not isinstance(dbg, unicode):
raise (TypeError("string", repr(dbg)))
if not('uri' in args):
raise UnmarshalException('argument missing', 'uri', '')
uri = args["uri"]
if not isinstance(uri, str) and not isinstance(uri, unicode):
raise (TypeError("string", repr(uri)))
if not('domain' in args):
raise UnmarshalException('argument missing', 'domain', '')
domain = args["domain"]
if not isinstance(domain, str) and not isinstance(domain, unicode):
raise (TypeError("string", repr(domain)))
results = self._impl.activate(dbg, uri, domain)
return results
def deactivate(self, args):
"""type-check inputs, call implementation, type-check outputs and return"""
if not isinstance(args, dict):
raise (UnmarshalException('arguments', 'dict', repr(args)))
if not('dbg' in args):
raise UnmarshalException('argument missing', 'dbg', '')
dbg = args["dbg"]
if not isinstance(dbg, str) and not isinstance(dbg, unicode):
raise (TypeError("string", repr(dbg)))
if not('uri' in args):
raise UnmarshalException('argument missing', 'uri', '')
uri = args["uri"]
if not isinstance(uri, str) and not isinstance(uri, unicode):
raise (TypeError("string", repr(uri)))
if not('domain' in args):
raise UnmarshalException('argument missing', 'domain', '')
domain = args["domain"]
if not isinstance(domain, str) and not isinstance(domain, unicode):
raise (TypeError("string", repr(domain)))
results = self._impl.deactivate(dbg, uri, domain)
return results
def detach(self, args):
"""type-check inputs, call implementation, type-check outputs and return"""
if not isinstance(args, dict):
raise (UnmarshalException('arguments', 'dict', repr(args)))
if not('dbg' in args):
raise UnmarshalException('argument missing', 'dbg', '')
dbg = args["dbg"]
if not isinstance(dbg, str) and not isinstance(dbg, unicode):
raise (TypeError("string", repr(dbg)))
if not('uri' in args):
raise UnmarshalException('argument missing', 'uri', '')
uri = args["uri"]
if not isinstance(uri, str) and not isinstance(uri, unicode):
raise (TypeError("string", repr(uri)))
if not('domain' in args):
raise UnmarshalException('argument missing', 'domain', '')
domain = args["domain"]
if not isinstance(domain, str) and not isinstance(domain, unicode):
raise (TypeError("string", repr(domain)))
results = self._impl.detach(dbg, uri, domain)
return results
def close(self, args):
"""type-check inputs, call implementation, type-check outputs and return"""
if not isinstance(args, dict):
raise (UnmarshalException('arguments', 'dict', repr(args)))
if not('dbg' in args):
raise UnmarshalException('argument missing', 'dbg', '')
dbg = args["dbg"]
if not isinstance(dbg, str) and not isinstance(dbg, unicode):
raise (TypeError("string", repr(dbg)))
if not('uri' in args):
raise UnmarshalException('argument missing', 'uri', '')
uri = args["uri"]
if not isinstance(uri, str) and not isinstance(uri, unicode):
raise (TypeError("string", repr(uri)))
results = self._impl.close(dbg, uri)
return results
def _dispatch(self, method, params):
"""type check inputs, call implementation, type check outputs and return"""
args = params[0]
if method == "Datapath.open":
return success(self.open(args))
elif method == "Datapath.attach":
return success(self.attach(args))
elif method == "Datapath.activate":
return success(self.activate(args))
elif method == "Datapath.deactivate":
return success(self.deactivate(args))
elif method == "Datapath.detach":
return success(self.detach(args))
elif method == "Datapath.close":
return success(self.close(args))
class Datapath_skeleton:
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
def __init__(self):
pass
def open(self, dbg, uri, persistent):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
raise Unimplemented("Datapath.open")
def attach(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
raise Unimplemented("Datapath.attach")
def activate(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
raise Unimplemented("Datapath.activate")
def deactivate(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
raise Unimplemented("Datapath.deactivate")
def detach(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
raise Unimplemented("Datapath.detach")
def close(self, dbg, uri):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
raise Unimplemented("Datapath.close")
class Datapath_test:
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
def __init__(self):
pass
def open(self, dbg, uri, persistent):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
result = {}
return result
def attach(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
result = {}
result["backend"] = { "domain_uuid": "string", "implementation": None }
return result
def activate(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
result = {}
return result
def deactivate(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
result = {}
return result
def detach(self, dbg, uri, domain):
"""Xapi will call the functions here on VM start/shutdown/suspend/resume/migrate. Every function is idempotent. Every function takes a domain parameter which allows the implementation to track how many domains are currently using the volume."""
result = {}
return result
def close(self, dbg, uri):
| |
= self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_delete_namespaced_service(self, namespace, name, **kwargs):
"""
proxy DELETE requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_delete_namespaced_service(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_delete_namespaced_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_delete_namespaced_service`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_delete_namespaced_service`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_options_namespaced_service(self, namespace, name, **kwargs):
"""
proxy OPTIONS requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_options_namespaced_service(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_options_namespaced_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_options_namespaced_service`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_options_namespaced_service`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}'.replace('{format}', 'json')
method = 'OPTIONS'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_get_namespaced_service_13(self, namespace, name, path, **kwargs):
"""
proxy GET requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_get_namespaced_service_13(namespace, name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_get_namespaced_service_13" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_get_namespaced_service_13`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_get_namespaced_service_13`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_get_namespaced_service_13`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}/{path}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_head_namespaced_service_14(self, namespace, name, path, **kwargs):
"""
proxy HEAD requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_head_namespaced_service_14(namespace, name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_head_namespaced_service_14" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_head_namespaced_service_14`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_head_namespaced_service_14`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_head_namespaced_service_14`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}/{path}'.replace('{format}', 'json')
method = 'HEAD'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_put_namespaced_service_15(self, namespace, name, path, **kwargs):
"""
proxy PUT requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_put_namespaced_service_15(namespace, name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
| |
<reponame>murthyn/composer
# Copyright 2021 MosaicML. All Rights Reserved.
"""Callback to save checkpoints during training."""
from __future__ import annotations
import logging
import os
import textwrap
from typing import Callable, Optional, Union
from composer.core import Event, State
from composer.core.callback import Callback
from composer.core.time import Time, TimeUnit
from composer.loggers import Logger
from composer.utils import checkpoint, dist, run_directory
log = logging.getLogger(__name__)
__all__ = ["CheckpointSaver", "checkpoint_periodically"]
def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, Time]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`~TimeUnit.EPOCH`\\s.
Otherwise, the unit must be either :attr:`TimeUnit.EPOCH` or :attr:`TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
and at the end of training.
Returns:
Callable[[State, Event], bool]: A function that can be passed as the ``save_interval``
argument into the :class:`CheckpointSaver`.
"""
if isinstance(interval, str):
interval = Time.from_timestring(interval)
if isinstance(interval, int):
interval = Time(interval, TimeUnit.EPOCH)
if interval.unit == TimeUnit.EPOCH:
save_event = Event.EPOCH_CHECKPOINT
elif interval.unit == TimeUnit.BATCH:
save_event = Event.BATCH_CHECKPOINT
else:
raise NotImplementedError(
f"Unknown checkpointing interval: {interval.unit}. Must be TimeUnit.EPOCH or TimeUnit.BATCH.")
last_checkpoint_batch = None
def save_interval(state: State, event: Event):
nonlocal last_checkpoint_batch
if state.get_elapsed_duration() >= 1.0:
# if doing batch-wise checkpointing, and we saved a checkpoint at the batch_checkpoint event
# right before the epoch_checkpoint event, do not save another checkpoint at the epoch_checkpoint
# event if the batch count didn't increase.
if state.timer.batch != last_checkpoint_batch:
last_checkpoint_batch = state.timer.batch
return True
if save_event == Event.EPOCH_CHECKPOINT:
count = state.timer.epoch
elif save_event == Event.BATCH_CHECKPOINT:
count = state.timer.batch
else:
raise RuntimeError(f"Invalid save_event: {save_event}")
if event == save_event and int(count) % int(interval) == 0:
last_checkpoint_batch = state.timer.batch
return True
return False
return save_interval
class CheckpointSaver(Callback):
"""Callback to save checkpoints.
.. note::
If the ``save_folder`` argument is specified constructing the :class:`~composer.trainer.trainer.Trainer`,
then the :class:`.CheckpointSaver` callback need not be constructed manually. However, for advanced
checkpointing use cases (such as saving a weights-only checkpoint at one interval and the full training state
at another interval), instance(s) of this :class:`.CheckpointSaver` callback can be specified in the
``callbacks`` argument of the :class:`~composer.trainer.trainer.Trainer`, as shown in the example below.
Example
.. testsetup::
from composer.callbacks.checkpoint_saver import CheckpointSaver
.. doctest::
>>> trainer = Trainer(..., callbacks=[
... CheckpointSaver(
... save_folder='checkpoints',
... name_format="ep{epoch}-ba{batch}/rank_{rank}",
... save_latest_format="latest/rank_{rank}",
... save_interval="1ep",
... weights_only=False,
... )
... ])
Args:
save_folder (str): Folder where checkpoints are saved.
If an absolute path is specified, then
that path will be used. Otherwise, the ``save_folder`` will be relative
to the folder returned by :meth:`~.run_directory.get_run_directory`.
If the ``save_folder`` does not exist, it will be created.
name_format (str, optional): A format string describing how to name checkpoints.
(default: ``'ep{epoch}-ba{batch}/rank_{rank}'``)
Checkpoints will be saved approximately to ``{save_folder}/{name_format.format(...)}``.
See :func:`.format_name` for the available format variables.
.. note::
* By default, only the rank zero process will save a checkpoint file.
* When using DeepSpeed, each rank will save a checkpoint file in tarball format. DeepSpeed
requires tarball format, as it saves model and optimizer states in separate files.
Ensure that ``'{rank}'`` appears within the ``name_format_string``. Otherwise, multiple ranks
may attempt to write to the same file(s), leading to corrupted checkpoints. If no tarball file
extension is specified, ``'.tar'`` will be used.
* To use compression (regardless of whether DeepSpeed is enabled), set the file extension
to ``'.tar.gz'``, ``'.tgz'``, ``'.tar.bzip'``, or ``'.tar.lzma'`` (depending on the desired
compression algorithm).
.. warning::
Using compression will block the training loop while checkpoints are being compressed. As such, we
recommend saving checkpoints without compression.
Consider the following scenario, where:
* The default ``save_folder='checkpoints'`` is used.
* The default ``name_format='ep{epoch}-ba{batch}/rank_{rank}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to ``"checkpoints/ep1-ba42/rank_0"``.
When DeepSpeed is being used, each rank (process) will save checkpoints to::
checkpoints/ep1-ba42/rank_0.tar
checkpoints/ep1-ba42/rank_1.tar
checkpoints/ep1-ba42/rank_2.tar
...
save_latest_format (str, optional): A format string for a symlink which points to the last saved checkpoint.
(default: ``'latest/rank_{rank}'``)
Symlinks will be created approximately at ``{save_folder}/{save_latest_format.format(...)}``.
See :func:`.format_name` for the available format variables.
To disable symlinks, set this parameter to ``None``.
Consider the following scenario, where:
* The default ``save_folder='checkpoints'`` is used.
* The default ``name_format='ep{epoch}-ba{batch}/rank_{rank}'`` is used.
* The default ``save_latest_format='latest/rank_{rank}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to ``'checkpoints/ep1-ba42/rank_0'``,
and a symlink will be created at ``'checkpoints/latest/rank_0' -> 'checkpoints/ep1-ba42/rank_0'``
When DeepSpeed is being used, each rank (process) will save checkpoints to::
checkpoints/ep1-ba42/rank_0.tar
checkpoints/ep1-ba42/rank_1.tar
checkpoints/ep1-ba42/rank_2.tar
...
Corresponding symlinks will be created at::
checkpoints/latest/rank_0.tar -> checkpoints/ep1-ba42/rank_0.tar
checkpoints/latest/rank_1.tar -> checkpoints/ep1-ba42/rank_1.tar
checkpoints/latest/rank_2.tar -> checkpoints/ep1-ba42/rank_2.tar
...
overwrite (bool, optional): Whether existing checkpoints should be overridden.
If ``False`` (the default), then the ``checkpoint_folder`` must not exist or be empty.
(default: ``False``)
save_interval (Time | str | int | (State, Event) -> bool): A :class:`Time`, time-string, integer (in epochs),
or a function that takes (state, event) and returns a boolean whether a checkpoint should be saved.
If an integer, checkpoints will be saved every n epochs.
If :class:`Time` or a time-string, checkpoints will be saved according to this interval.
.. seealso:: :func:`.checkpoint_periodically`
If a function, then this function should take two arguments (:class:`State`, :class:`Event`).
The first argument will be the current state of the trainer, and the second argument will be
be :attr:`.Event.BATCH_CHECKPOINT` or :attr:`.EPOCH_CHECKPOINT` (depending on the current training
progress). It should return ``True`` if a checkpoint should be saved given the current state and
event.
weights_only (bool): If ``True``, save only the model weights instead of the entire training state.
This parmeter must be ``False`` when using DeepSpeed. (default: ``False``)
Attributes:
checkpoint_folder (str): The folder in which checkpoints are stored. If an absolute path was specified for
``save_folder`` upon instantiation, then that path will be used. Otherwise, this folder is relative to
the run directory of the training run (e.g. ``{run_directory}/{save_folder}``).
If no run directory is provided, then by default, it is of the form
``runs/<timestamp>/rank_<GLOBAL_RANK>/<save_folder>`` where ``timestamp``
is the start time of the run in iso-format, ``GLOBAL_RANK`` is the global rank of the process,
and ``save_folder`` is the save_folder argument provided upon construction.
.. seealso:: :mod:`~.run_directory` for details on the format of the run directory
and how to customize it.
saved_checkpoints (Dict[Timestamp, List[str]]): A dictionary mapping a save timestamp
to a list of filepaths corresponding to the checkpoints saved at that time.
.. note:: When using DeepSpeed, the index of a filepath in each list corresponds to the
global rank of the process that wrote that file. These filepaths are valid only on
the global rank's node. Otherwise, when not using DeepSpeed, this list will contain
only one filepath since only rank zero saves checkpoints.
"""
def __init__(
self,
save_folder: str = "checkpoints",
name_format: str = "ep{epoch}-ba{batch}/rank_{rank}",
save_latest_format: Optional[str] = "latest/rank_{rank}",
overwrite: bool = False,
save_interval: Union[Time, str, int, Callable[[State, Event], bool]] = "1ep",
weights_only: bool = False,
):
if not callable(save_interval):
save_interval = checkpoint_periodically(save_interval)
self.checkpoint_folder = os.path.join(run_directory.get_run_directory(), save_folder)
self.name_format = name_format
self.save_latest_format = save_latest_format
self.overwrite = overwrite
self.save_interval = save_interval
self.saved_checkpoints = {}
self.weights_only = weights_only
def init(self, state: State, logger: Logger) -> None:
# Each rank will attempt to create the checkpoint folder.
# If the folder is not parameterized by rank, then exist_ok must be True, as the folder will be the same on all ranks.
os.makedirs(self.checkpoint_folder, mode=0o775, exist_ok=True)
if not self.overwrite:
if any(x.startswith(".") for x in os.listdir(self.checkpoint_folder)):
raise RuntimeError(
textwrap.dedent(f"""\
Checkpoint folder {self.checkpoint_folder} is not empty. When using {type(self).__name__}(overwrite=True, ...),
the checkpoint folder must not contain any existing checkpoints."""))
# Ensure no rank proceeds (and potentially attempts to write to the folder), until all ranks have validated that the folder is empty.
dist.barrier()
def fit_start(self, state: State, logger: Logger) -> None:
if state.is_model_deepspeed:
| |
that the resource was updated. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param str created_by_id: (optional)
OCID of the user who created the resource.
:param str updated_by_id: (optional)
OCID of the user who updated the resource.
:param list[str] fields: (optional)
Specifies the fields to return in a glossary summary response.
Allowed values are: "key", "displayName", "description", "catalogId", "lifecycleState", "timeCreated", "uri", "workflowStatus"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.GlossaryCollection`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/glossaries"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"lifecycle_state",
"time_created",
"time_updated",
"created_by_id",
"updated_by_id",
"fields",
"sort_by",
"sort_order",
"limit",
"page",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_glossaries got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'fields' in kwargs:
fields_allowed_values = ["key", "displayName", "description", "catalogId", "lifecycleState", "timeCreated", "uri", "workflowStatus"]
for fields_item in kwargs['fields']:
if fields_item not in fields_allowed_values:
raise ValueError(
"Invalid value for `fields`, must be one of {0}".format(fields_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"timeCreated": kwargs.get("time_created", missing),
"timeUpdated": kwargs.get("time_updated", missing),
"createdById": kwargs.get("created_by_id", missing),
"updatedById": kwargs.get("updated_by_id", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="GlossaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="GlossaryCollection")
def list_job_definitions(self, catalog_id, **kwargs):
"""
Returns a list of job definitions within a data catalog.
:param str catalog_id: (required)
Unique catalog identifier.
:param str display_name: (optional)
A filter to return only resources that match the entire display name given. The match is not case sensitive.
:param str lifecycle_state: (optional)
A filter to return only resources that match the specified lifecycle state. The value is case insensitive.
:param str job_type: (optional)
Job type.
:param bool is_incremental: (optional)
Whether job definition is an incremental harvest (true) or a full harvest (false).
:param str data_asset_key: (optional)
Unique data asset key.
:param str connection_key: (optional)
Unique connection key.
:param datetime time_created: (optional)
Time that the resource was created. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param datetime time_updated: (optional)
Time that the resource was updated. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param str created_by_id: (optional)
OCID of the user who created the resource.
:param str updated_by_id: (optional)
OCID of the user who updated the resource.
:param str sample_data_size_in_mbs: (optional)
The sample data size in MB, specified as number of rows, for a metadata harvest.
:param list[str] fields: (optional)
Specifies the fields to return in a job definition summary response.
Allowed values are: "key", "displayName", "description", "catalogId", "jobType", "lifecycleState", "timeCreated", "isSampleDataExtracted", "uri"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.JobDefinitionCollection`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/jobDefinitions"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"lifecycle_state",
"job_type",
"is_incremental",
"data_asset_key",
"connection_key",
"time_created",
"time_updated",
"created_by_id",
"updated_by_id",
"sample_data_size_in_mbs",
"fields",
"sort_by",
"sort_order",
"limit",
"page",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_job_definitions got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'fields' in kwargs:
fields_allowed_values = ["key", "displayName", "description", "catalogId", "jobType", "lifecycleState", "timeCreated", "isSampleDataExtracted", "uri"]
for fields_item in kwargs['fields']:
if fields_item not in fields_allowed_values:
raise ValueError(
"Invalid value for `fields`, must be one of {0}".format(fields_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"jobType": kwargs.get("job_type", missing),
"isIncremental": kwargs.get("is_incremental", missing),
"dataAssetKey": kwargs.get("data_asset_key", missing),
"connectionKey": kwargs.get("connection_key", missing),
"timeCreated": kwargs.get("time_created", missing),
"timeUpdated": kwargs.get("time_updated", missing),
"createdById": kwargs.get("created_by_id", missing),
"updatedById": kwargs.get("updated_by_id", missing),
"sampleDataSizeInMBs": kwargs.get("sample_data_size_in_mbs", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="JobDefinitionCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="JobDefinitionCollection")
def list_job_executions(self, catalog_id, job_key, **kwargs):
"""
Returns a list of job executions for a job.
:param str catalog_id: (required)
Unique catalog identifier.
:param str job_key: (required)
Unique job key.
:param str lifecycle_state: (optional)
Job | |
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main OpenHTF entry point."""
import argparse
import collections
import copy
import functools
import inspect
import itertools
import json
import logging
import signal
import socket
import sys
import textwrap
import threading
import uuid
import weakref
from types import LambdaType
import mutablerecords
from enum import Enum
from openhtf import core
from openhtf import plugs
from openhtf import util
from openhtf.core.measurements import Dimension, Measurement, measures
from openhtf.core.monitors import monitors
from openhtf.core import phase_executor
from openhtf.core import station_api
from openhtf.core import test_record
from openhtf.plugs import plug
from openhtf.util import conf
from openhtf.util import data
from openhtf.util import functions
from openhtf.util import logs
from openhtf.util import units
__version__ = util.get_version()
_LOG = logging.getLogger(__name__)
conf.declare('capture_source', description=textwrap.dedent(
'''Whether to capture the source of phases and the test module. This
defaults to False since this potentially reads many files and makes large
string copies.
Set to 'true' if you want to capture your test's source.'''),
default_value=False)
class UnrecognizedTestUidError(Exception):
"""Raised when information is requested about an unknown Test UID."""
class InvalidTestPhaseError(Exception):
"""Raised when an invalid method is decorated."""
class InvalidTestStateError(Exception):
"""Raised when an operation is attempted in an invalid state."""
class Test(object):
"""An object that represents an OpenHTF test.
Example:
def PhaseOne(test):
# Integrate more widgets
def PhaseTwo(test):
# Analyze widget integration status
Test(PhaseOne, PhaseTwo).execute()
Note that Test() objects *must* be created in the main thread, but can be
.execute()'d in a separate thread.
"""
TEST_INSTANCES = weakref.WeakValueDictionary()
def __init__(self, *phases, **metadata):
# Some sanity checks on special metadata keys we automatically fill in.
if 'config' in metadata:
raise KeyError(
'Invalid metadata key "config", it will be automatically populated.')
self.created_time_millis = util.time_millis()
self.last_run_time_millis = None
self._test_options = TestOptions()
self._lock = threading.Lock()
self._executor = None
self._test_desc = TestDescriptor(
phases, test_record.CodeInfo.uncaptured(), metadata)
if conf.capture_source:
# First, we copy the phases with the real CodeInfo for them.
phases = [
mutablerecords.CopyRecord(
phase, code_info=test_record.CodeInfo.for_function(phase.func))
for phase in self._test_desc.phases]
# Then we replace the TestDescriptor with one that stores the test
# module's CodeInfo as well as our newly copied phases.
code_info = test_record.CodeInfo.for_module_from_stack(levels_up=2)
self._test_desc = self._test_desc._replace(
code_info=code_info, phases=phases)
# Make sure configure() gets called at least once before Execute(). The
# user might call configure() again to override options, but we don't want
# to force them to if they want to use defaults. For default values, see
# the class definition of TestOptions.
if 'test_name' in metadata:
# Allow legacy metadata key for specifying test name.
self.configure(name=metadata['test_name'])
else:
self.configure()
# This is a noop if the server is already running, otherwise start it now
# that we have at least one Test instance.
station_api.start_server()
@classmethod
def from_uid(cls, test_uid):
"""Get Test by UID.
Returns: Test object, given by UID.
Raises:
UnrecognizedTestUidError: If the test_uid is not recognized.
"""
test = cls.TEST_INSTANCES.get(test_uid)
if not test:
raise UnrecognizedTestUidError('Test UID %s not recognized' % test_uid)
return test
@property
def uid(self):
if self._executor is not None:
return self._executor.uid
def make_uid(self):
"""Returns the next test execution's UID.
This identifier must be unique but trackable across invocations of
execute(). Therefore, it's made of three parts separated by ':'
* Process-specific (decided on process start up)
* Test descriptor-specific (decided on descriptor creation)
* Execution-specific (decided on test start)
"""
return ':'.join([
station_api.STATION_API.UID, self.descriptor.uid, uuid.uuid4().hex[:16]])
@property
def descriptor(self):
"""Static data about this test, does not change across Execute() calls."""
return self._test_desc
@property
def state(self):
"""Transient state info about the currently executing test, or None."""
with self._lock:
if self._executor:
return self._executor.test_state
def get_option(self, option):
return getattr(self._test_options, option)
def add_output_callbacks(self, *callbacks):
"""Add the given function as an output module to this test."""
self._test_options.output_callbacks.extend(callbacks)
def configure(self, **kwargs):
"""Update test-wide configuration options. See TestOptions for docs."""
# These internally ensure they are safe to call multiple times with no weird
# side effects.
known_args, _ = create_arg_parser(add_help=True).parse_known_args()
if known_args.config_help:
sys.stdout.write(conf.help_text)
sys.exit(0)
logs.setup_logger()
for key, value in kwargs.items():
setattr(self._test_options, key, value)
@classmethod
def handle_sig_int(cls, *_):
if cls.TEST_INSTANCES:
_LOG.error('Received SIGINT, stopping all tests.')
for test in cls.TEST_INSTANCES.values():
test.stop_from_sig_int()
station_api.stop_server()
# The default SIGINT handler does this. If we don't, then nobody above
# us is notified of the event. This will raise this exception in the main
# thread.
raise KeyboardInterrupt()
def stop_from_sig_int(self):
"""Stop test execution as abruptly as we can, only in response to SIGINT."""
with self._lock:
_LOG.error('Stopping %s due to SIGINT', self)
if self._executor:
# TestState str()'s nicely to a descriptive string, so let's log that
# just for good measure.
_LOG.error('Test state: %s', self._executor.test_state)
self._executor.stop()
def execute(self, test_start=None):
"""Starts the framework and executes the given test.
Args:
test_start: Either a trigger phase for starting the test, or a function
that returns a DUT ID. If neither is provided, defaults to not
setting the DUT ID.
"""
# Lock this section so we don't .stop() the executor between instantiating
# it and .Start()'ing it, doing so does weird things to the executor state.
with self._lock:
# Sanity check to make sure someone isn't doing something weird like
# trying to Execute() the same test twice in two separate threads. We
# hold the lock between here and Start()'ing the executor to guarantee
# that only one thread is successfully executing the test.
if self._executor:
raise InvalidTestStateError('Test already running', self._executor)
# Snapshot some things we care about and store them.
self._test_desc.metadata['test_name'] = self._test_options.name
self._test_desc.metadata['config'] = conf._asdict()
self.last_run_time_millis = util.time_millis()
if isinstance(test_start, LambdaType):
@TestPhase()
def trigger_phase(test):
test.test_record.dut_id = test_start()
trigger = trigger_phase
else:
trigger = test_start
if conf.capture_source:
trigger.code_info = test_record.CodeInfo.for_function(trigger.func)
self._executor = core.TestExecutor(
self._test_desc, self.make_uid(), trigger,
self._test_options.teardown_function,
self._test_options.failure_exceptions)
_LOG.info('Executing test: %s', self.descriptor.code_info.name)
self.TEST_INSTANCES[self.uid] = self
self._executor.start()
try:
self._executor.wait()
finally:
try:
final_state = self._executor.finalize()
_LOG.debug('Test completed for %s, outputting now.',
final_state.test_record.metadata['test_name'])
for output_cb in self._test_options.output_callbacks:
try:
output_cb(final_state.test_record)
except Exception: # pylint: disable=broad-except
_LOG.exception(
'Output callback %s raised; continuing anyway', output_cb)
finally:
del self.TEST_INSTANCES[self.uid]
self._executor = None
return final_state.test_record.outcome == test_record.Outcome.PASS
class TestOptions(mutablerecords.Record('TestOptions', [], {
'name': 'OpenHTF Test',
'output_callbacks': list,
'teardown_function': None,
'failure_exceptions': list,
})):
"""Class encapsulating various tunable knobs for Tests and their defaults.
name: The name of the test to be put into the metadata.
output_callbacks: List of output callbacks to run, typically it's better to
use add_output_callbacks(), but you can pass [] here to reset them.
teardown_function: Function to run at teardown. We pass the same arguments to
it as a phase.
failure_exceptions: Exceptions to cause a test FAIL instead of ERROR. When a
test run exits early due to an exception, the run will be marked as a FAIL
if the raised exception matches one of the types in this list. Otherwise,
the run is marked as ERROR.
"""
class TestDescriptor(collections.namedtuple(
'TestDescriptor', ['phases', 'code_info', 'metadata', 'uid'])):
"""An object that represents the reusable portions of an OpenHTF test.
This object encapsulates the static test information that is set once and used
by the framework along the way.
Attributes:
phases: The phases to execute for this Test.
metadata: Any metadata that should be associated with test records.
code_info: Information about the module that created the Test.
uid: UID for this test.
"""
def __new__(cls, phases, code_info, metadata):
phases = [PhaseDescriptor.wrap_or_copy(phase) for phase in phases]
return super(TestDescriptor, cls).__new__(
cls, phases, code_info, metadata, uid=uuid.uuid4().hex[:16])
@property
def plug_types(self):
"""Returns set of plug types required by this test."""
return {plug.cls for phase in self.phases for plug in phase.plugs}
def create_arg_parser(add_help=False):
"""Creates an argparse.ArgumentParser for parsing command line flags.
If you want to add arguments, create your own with this as a parent:
>>> parser = argparse.ArgumentParser(
'My args title', parents=[openhtf.create_arg_parser()])
>>> parser.parse_args()
"""
parser = argparse.ArgumentParser('OpenHTF-based testing', parents=[
conf.ARG_PARSER, phase_executor.ARG_PARSER, logs.ARG_PARSER],
add_help=add_help)
parser.add_argument(
'--config-help', action='store_true',
help='Instead of executing the test, simply print all available config '
| |
<reponame>gr4viton/kivent-robotic-visualizer<filename>py/surface.py
from random import randint, choice, randrange
from math import radians, pi, sin, cos
from kivy.core.window import Window
import logging
from logging import info as prinf
from logging import debug as prind
from logging import warning as prinw
from logging import error as prine
import time
import random as rnd
from random import randint, choice, randrange
import svgwrite as svg
import glob
import json
import os
class Map2D:
def __init__(self, root):
self.root = root
self.__svg_map_dir__ = "../assets/maps/svg/"
self.clear_maps()
self.cts = self.root.collision_types
self.draw_walls()
# self.draw_some_stuff()
self.draw_obstacles()
def clear_maps(self):
prinf("Deleting maps in " + self.__svg_map_dir__)
# shutil.rmtree(self.__svg_map_dir__)
files = glob.glob(self.__svg_map_dir__ + "*")
for f in files:
os.remove(f)
def draw_walls(self):
Ww, Wh = Wsize = Window.size
prinw(Wsize)
self.root.info(Wsize)
self.wall_id = 0
# thickness
t = 125
txu = "warning"
x0, y0 = 30, 30
# w0, h0 = Ww-2*x0, Wh-2*y0
w0, h0 = self.root.field_size
sizes = [(w0, t), (t, h0 + 2 * t), (w0, t), (t, h0 + 2 * t)]
poss = [[0, -t], [w0, -t], [0, h0], [-t, -t]]
poss = [[pos[0] + x0, pos[1] + y0] for pos in poss]
for pos, size in zip(poss, sizes):
self.create_wall(pos, size, txu)
def create_wall(self, pos_lf, size, txu, name="wall"):
w, h = size
cat = "wall"
pos = [pos_lf[i] + size[i] / 2 for i in range(2)]
model_key = cat + str(self.wall_id)
self.root.gameworld.model_manager.load_textured_rectangle(
"vertex_format_4f", w, h, txu, model_key
)
mass = 0
shape_dict = {"width": w, "height": h, "mass": mass}
col_shape = {
"shape_type": "box",
"elasticity": 1.0,
"collision_type": self.cts[cat],
"shape_info": shape_dict,
"friction": 1.0,
}
col_shapes = [col_shape]
physics_component = {
"main_shape": "box",
"velocity": (0, 0),
"position": pos,
"angle": 0,
"angular_velocity": 0,
"vel_limit": 0,
"ang_vel_limit": 0,
"mass": mass,
"col_shapes": col_shapes,
}
create_component_dict = {
"cymunk_physics": physics_component,
"rotate_renderer": {"texture": txu, "model_key": model_key},
"position": pos,
"rotate": 0,
}
name = cat + str(randint(0, 100000))
object_info = {"name": name, "category": cat}
component_order = [
"position",
"rotate",
"rotate_renderer",
"cymunk_physics",
]
self.wall_id += 1
return self.root.init_entity(
create_component_dict, component_order, object_info=object_info
)
def draw_rect_obstacles(self, count=10):
fname = self.create_rect_obstacles(count)
self.root.fl.load_svg(fname, self.root.gameworld)
def draw_obstacles(self, count=10):
self.create_obstacles(count)
# self.draw_rect_obstacles()
# def draw_stuff(self):
# self.draw_obstacles()
# self.draw_rect_obstacles()
# pass
def create_obstacles(self, count):
self.color = "#42ACDC"
self.stroke_color = "#000000"
Fw, Fh = self.root.field_size
w, h = Fw, Fh
siz = (str(w), str(h))
print(siz)
dens_interval = (900, 1000)
# mass_interval = (1000, 5000)
smaller = h if h <= w else w
siz_min, siz_max = one_siz_interval = 0.01 * smaller, 0.08 * smaller
siz_interval = [one_siz_interval, one_siz_interval]
model_manager = self.root.gameworld.model_manager
start = int(time.time())
for i in range(count):
group = start - i
print(group)
rnd.seed(time.time())
siz = [randint(*siz_interval[i]) for i in range(2)]
pos_lf_interval = ((0, w - siz[0]), (0, h - siz[1]))
pos_of_shape = [
randint(*pos_lf_interval[i]) + siz[i] / 2 for i in range(2)
]
pos = (0, 0)
# mass = randint(*mass_interval)
dens = randint(*dens_interval)
mass = siz[0] * siz[1] * dens / 1000
cat = "obstacle"
name = cat + str(i)
info_dict = {
"mass": mass,
"object_info": {"name": name, "category": cat},
}
color = (0, 128, 255, 255)
v_count = randint(3, 5) + randint(0, 5)
model_data = self.get_polyobstacle(pos, siz, v_count, color)
# self.root.pprint(model_data)
model_name = (
"poly_obstacle" + str(v_count) + "v" + str(time.time())
)
model = model_manager.load_model(
"vertex_format_2f4ub",
model_data["vertex_count"],
model_data["index_count"],
model_name,
indices=model_data["indices"],
vertices=model_data["vertices"],
)
col_shapes = []
for tri_verts in model_data["tri_list"]:
col_shape = {
"shape_type": "poly",
"elasticity": 0.6,
"collision_type": self.cts[cat],
"friction": 1.0,
"group": group,
"shape_info": {
"mass": mass,
"offset": (0, 0),
"vertices": tri_verts,
},
}
col_shapes.append(col_shape)
physics = {
"main_shape": "poly",
"velocity": (0, 0),
"position": pos_of_shape,
"angle": 0,
"angular_velocity": radians(0),
"ang_vel_limit": radians(0),
"mass": mass,
"col_shapes": col_shapes,
}
component_dict = {
"position": pos_of_shape,
"rotate_poly_renderer": {"model_key": model},
"cymunk_physics": physics,
"rotate": radians(0),
}
cat = "obstacle"
info_dict = {
"mass": mass,
"object_info": {"name": name, "category": cat},
}
info_str = json.dumps(info_dict)
object_info = info_dict["object_info"]
print("robot component creation")
component_order = [
"position",
"rotate",
"rotate_poly_renderer",
"cymunk_physics",
]
# self.root.init_entity(component_dict, component_order, object_info=object_info)
self.ent = self.root.gameworld.init_entity(
component_dict, component_order
)
self.root.add_entity(self.ent, cat)
print(">>>>>>", self.ent)
# kivy - 12_drawing_shapes
def create_rect_obstacles(self, count):
self.color = "#42ACDC"
self.stroke_color = "#000000"
self.path = self.__svg_map_dir__ + "map{}.svg"
Fw, Fh = self.root.field_size
w, h = Fw, Fh
siz = (str(w), str(h))
print(siz)
self.dwg = None
fname = self.path.format(time.time())
self.dwg = svg.Drawing(
fname, size=siz, baseProfile="full", debug=False
)
# group = self.dwg.add(self.dwg.g(id='obstacles', fill=self.color))
dens_interval = (900, 1000)
# mass_interval = (1000, 5000)
smaller = h if h <= w else w
siz_min, siz_max = one_siz_interval = 0.01 * smaller, 0.1 * smaller
siz_interval = [one_siz_interval, one_siz_interval]
for i in range(count):
rnd.seed(time.time())
# pos = siz = (100,100)
# rnd.seed(time.time())
siz = [randint(*siz_interval[i]) for i in range(2)]
pos_lf_interval = ((0, w - siz[0]), (0, h - siz[1]))
pos = [randint(*pos_lf_interval[i]) + siz[i] / 2 for i in range(2)]
# mass = randint(*mass_interval)
dens = randrange(*dens_interval)
mass = siz[0] * siz[1] * dens / 1000
cat = "obstacle_rect"
name = cat + str(i)
info_dict = {
"mass": mass,
"collision_type": self.cts[cat],
"object_info": {"name": name, "category": cat},
}
# id is necessary attribut for the kivent svg loader!, also I use it for sharing info about the obstacle
info_str = json.dumps(info_dict)
desc = name
id_str = name
color = self.color
stroke_color = self.stroke_color
rect = self.dwg.rect(
id=id_str,
insert=pos,
size=siz,
fill=color,
stroke=stroke_color,
description=info_str,
)
if i == 0:
print(siz, pos, mass)
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>ww")
# print(inspect.getfullargspec())
# group.add(rect)
self.dwg.add(rect)
self.root.info("saving: " + self.dwg.filename)
self.dwg.save()
return fname
# kivy - 12_drawing_shapes
def get_layered_regular_polygon(
levels, sides, middle_color, radius_color_dict, pos=(0.0, 0.0)
):
"""
radius_color_dict = {level#: (r, (r,g,b,a))}
"""
x, y = pos
angle = 2 * pi / sides
all_verts = {}
all_verts[0] = {"pos": pos, "v_color": middle_color}
r_total = 0
i = 0
indices = []
vert_count = 1
ind_count = 0
ind_ext = indices.extend
for count in range(levels):
level = i + 1
r, color = radius_color_dict[level]
for s in range(sides):
new_pos = list(
(
x + (r + r_total) * sin(s * angle),
y + (r + r_total) * cos(s * angle),
)
)
all_verts[vert_count] = {"pos": new_pos, "v_color": color}
vert_count += 1
r_total += r
c = 1 # side number we are on in loop
if level == 1:
for each in range(sides):
if c < sides:
ind_ext((c, 0, c + 1))
else:
ind_ext((c, 0, 1))
ind_count += 3
c += 1
else:
for each in range(sides):
offset = sides * (i - 1)
if c < sides:
ind_ext(
(
c + sides + offset,
c + sides + 1 + offset,
c + offset,
)
)
ind_ext(
(
c + offset,
c + 1 + offset,
c + sides + 1 + offset,
)
)
else:
ind_ext(
(
c + sides + offset,
sides + 1 + offset,
sides + offset,
)
)
ind_ext(
(sides + offset, 1 + offset, sides + 1 + offset)
)
ind_count += 6
c += 1
i += 1
return {
"indices": indices,
"vertices": all_verts,
"vertex_count": vert_count,
"index_count": ind_count,
}
@staticmethod
def get_polyobstacle(pos, siz, v_count, color):
rnd.seed(time.time())
vert_count = 0
ind_count = 0
angles = [radians(randint(0, 360)) for i in range(v_count)]
angles = sorted(angles)
indices = []
vert_positions = [(0.0, 0.0)]
rad_range = sorted(siz)
all_verts = {}
tri_list = []
angles = [radians(360 * i / v_count) for i in range(v_count)]
def get_randomized_angle(j, k, k_angle_offset=0):
a_1 = angles[j]
a_2 = angles[k] + k_angle_offset
a_min = a_1
a_max = a_1 + (a_2 - a_1) * 0.8 # to not make it too convex
return randint(int(a_min * 100), int(a_max * 100)) / 100
angles = [
get_randomized_angle(i, i + 1) for i in range(len(angles) - 1)
]
angles.append(get_randomized_angle(len(angles) - 1, 0, radians(360)))
print(angles)
for ang in angles:
radius = randint(*rad_range)
rand_siz = [
randint(int(s * 1000 / 2), int(s * 1000)) / 1000 / s
for s in siz
]
pos = (
radius * cos(ang) * rand_siz[0],
radius * sin(ang) * rand_siz[1],
)
vert_positions.append(pos)
for i, v_pos in enumerate(vert_positions):
all_verts.update({i: {"pos": v_pos, "v_color": color}})
if i > 1:
inds = (0, i - 1, i)
indices.extend(inds)
tri_list.append([vert_positions[ind] | |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Radius.Nodes.Node.Client' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Client',
False,
[
_MetaInfoClassMember('authentication-nas-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' NAS-Identifier of the RADIUS authentication
client
''',
'authentication_nas_id',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-accounting-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of RADIUS accounting responses packets
received from unknown addresses
''',
'unknown_accounting_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-authentication-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of RADIUS access responses packets
received from unknown addresses
''',
'unknown_authentication_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'client',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time',
False,
[
_MetaInfoClassMember('is-computed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if computed; false if not
''',
'is_computed',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Value for time or tries
''',
'value',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'time',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries',
False,
[
_MetaInfoClassMember('is-computed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if computed; false if not
''',
'is_computed',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Value for time or tries
''',
'value',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'tries',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts.Host' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts.Host',
False,
[
_MetaInfoClassMember('acct-port-number', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Accounting Port number (standard port 1646)
''',
'acct_port_number',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('auth-port-number', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Authentication Port number (standard port
1645)
''',
'auth_port_number',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP address of RADIUS server
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False, [
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
]),
_MetaInfoClassMember('time', REFERENCE_CLASS, 'Time' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time',
[], [],
''' Time in seconds
''',
'time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('tries', REFERENCE_CLASS, 'Tries' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries',
[], [],
''' Number of tries
''',
'tries',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts.Host',
[], [],
''' RADIUS Server
''',
'host',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria',
False,
[
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts',
[], [],
''' RADIUS server dead criteria host table
''',
'hosts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'dead-criteria',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_',
False,
[
_MetaInfoClassMember('access-accepts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access accepts
''',
'access_accepts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-challenges', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access challenges
''',
'access_challenges',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-rejects', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access rejects
''',
'access_rejects',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-request-retransmits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of retransmitted access requests
''',
'access_request_retransmits',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access requests
''',
'access_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-timeouts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access packets timed out
''',
'access_timeouts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect authentication responses
''',
'authen_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authentication
requests
''',
'authen_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error authentication responses
''',
'authen_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authentication transactions
''',
'authen_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authentication transactions
''',
'authen_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected authentication responses
''',
'authen_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-access-authenticators', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad access authenticators
''',
'bad_access_authenticators',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-access-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad access responses
''',
'bad_access_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dropped-access-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access responses dropped
''',
'dropped_access_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('pending-access-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of pending access requests
''',
'pending_access_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('rtt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Round trip time for authentication in
milliseconds
''',
'rtt',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-access-types', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets received with unknown type
from authentication server
''',
'unknown_access_types',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Authentication.AuthenticationGroup' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Authentication.AuthenticationGroup',
False,
[
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_',
[], [],
''' Authentication data
''',
'authentication',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('family', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address Family
''',
'family',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address buffer
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Authentication port number
''',
'port',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('server-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'server_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication-group',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Authentication' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Authentication',
False,
[
_MetaInfoClassMember('authentication-group', REFERENCE_LIST, 'AuthenticationGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Authentication.AuthenticationGroup',
[], [],
''' List of authentication groups
''',
'authentication_group',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_',
False,
[
_MetaInfoClassMember('acct-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect accounting responses
''',
'acct_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authentication
requests
''',
'acct_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error accounting responses
''',
'acct_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authentication transactions
''',
'acct_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authentication transactions
''',
'acct_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected accounting responses
''',
'acct_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-authenticators', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad accounting authenticators
''',
'bad_authenticators',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad accounting responses
''',
'bad_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dropped-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting responses dropped
''',
'dropped_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('pending-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of pending accounting requests
''',
'pending_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting requests
''',
'requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting responses
''',
'responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('retransmits', ATTRIBUTE, 'int' , None, | |
<reponame>ravithanneeru/azure-cli-extensions<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._source_control_configuration_client_enums import *
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class ClusterScopeSettings(ProxyResource):
"""Extension scope settings.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param allow_multiple_instances: Describes if multiple instances of the extension are allowed.
:type allow_multiple_instances: bool
:param default_release_namespace: Default extension release namespace.
:type default_release_namespace: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'allow_multiple_instances': {'key': 'properties.allowMultipleInstances', 'type': 'bool'},
'default_release_namespace': {'key': 'properties.defaultReleaseNamespace', 'type': 'str'},
}
def __init__(
self,
*,
allow_multiple_instances: Optional[bool] = None,
default_release_namespace: Optional[str] = None,
**kwargs
):
super(ClusterScopeSettings, self).__init__(**kwargs)
self.allow_multiple_instances = allow_multiple_instances
self.default_release_namespace = default_release_namespace
class ComplianceStatus(msrest.serialization.Model):
"""Compliance Status details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_state: The compliance state of the configuration. Possible values include:
"Pending", "Compliant", "Noncompliant", "Installed", "Failed".
:vartype compliance_state: str or
~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ComplianceStateType
:param last_config_applied: Datetime the configuration was last applied.
:type last_config_applied: ~datetime.datetime
:param message: Message from when the configuration was applied.
:type message: str
:param message_level: Level of the message. Possible values include: "Error", "Warning",
"Information".
:type message_level: str or
~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.MessageLevelType
"""
_validation = {
'compliance_state': {'readonly': True},
}
_attribute_map = {
'compliance_state': {'key': 'complianceState', 'type': 'str'},
'last_config_applied': {'key': 'lastConfigApplied', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'message_level': {'key': 'messageLevel', 'type': 'str'},
}
def __init__(
self,
*,
last_config_applied: Optional[datetime.datetime] = None,
message: Optional[str] = None,
message_level: Optional[Union[str, "MessageLevelType"]] = None,
**kwargs
):
super(ComplianceStatus, self).__init__(**kwargs)
self.compliance_state = None
self.last_config_applied = last_config_applied
self.message = message
self.message_level = message_level
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details:
list[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Extension(ProxyResource):
"""The Extension object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param identity: Identity of the Extension resource.
:type identity: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.Identity
:ivar system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources.
:vartype system_data: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.SystemData
:param extension_type: Type of the Extension, of which this resource is an instance of. It
must be one of the Extension Types registered with Microsoft.KubernetesConfiguration by the
Extension publisher.
:type extension_type: str
:param auto_upgrade_minor_version: Flag to note if this extension participates in auto upgrade
of minor version, or not.
:type auto_upgrade_minor_version: bool
:param release_train: ReleaseTrain this extension participates in for auto-upgrade (e.g.
Stable, Preview, etc.) - only if autoUpgradeMinorVersion is 'true'.
:type release_train: str
:param version: Version of the extension for this extension, if it is 'pinned' to a specific
version. autoUpgradeMinorVersion must be 'false'.
:type version: str
:param scope: Scope at which the extension is installed.
:type scope: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.Scope
:param configuration_settings: Configuration settings, as name-value pairs for configuring this
extension.
:type configuration_settings: dict[str, str]
:param configuration_protected_settings: Configuration settings that are sensitive, as
name-value pairs for configuring this extension.
:type configuration_protected_settings: dict[str, str]
:ivar provisioning_state: Status of installation of this extension. Possible values include:
"Succeeded", "Failed", "Canceled", "Creating", "Updating", "Deleting".
:vartype provisioning_state: str or
~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ProvisioningState
:param statuses: Status from this extension.
:type statuses:
list[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ExtensionStatus]
:ivar error_info: Error information from the Agent - e.g. errors during installation.
:vartype error_info: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ErrorDetail
:ivar custom_location_settings: Custom Location settings properties.
:vartype custom_location_settings: dict[str, str]
:ivar package_uri: Uri of the Helm package.
:vartype package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'error_info': {'readonly': True},
'custom_location_settings': {'readonly': True},
'package_uri': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'extension_type': {'key': 'properties.extensionType', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'release_train': {'key': 'properties.releaseTrain', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'Scope'},
'configuration_settings': {'key': 'properties.configurationSettings', 'type': '{str}'},
'configuration_protected_settings': {'key': 'properties.configurationProtectedSettings', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'statuses': {'key': 'properties.statuses', 'type': '[ExtensionStatus]'},
'error_info': {'key': 'properties.errorInfo', 'type': 'ErrorDetail'},
'custom_location_settings': {'key': 'properties.customLocationSettings', 'type': '{str}'},
'package_uri': {'key': 'properties.packageUri', 'type': 'str'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
extension_type: Optional[str] = None,
auto_upgrade_minor_version: Optional[bool] = True,
release_train: Optional[str] = "Stable",
version: Optional[str] = None,
scope: Optional["Scope"] | |
lineage_probability_color=lineage_probability_color,
abs_prob_cmap=abs_prob_cmap,
lineage_probability=show_prob,
ylabel=ylabel,
**kwargs,
)
if sharey in ("row", "all", True) and not ylabel_shown:
plt.setp(ax.get_yticklabels(), visible=True)
if show_xticks_and_label[i]:
plt.setp(ax.get_xticklabels(), visible=True)
else:
ax.set_xlabel(None)
last_ax = ax
ylabel_shown = True
cells_shown = True
key, color, typp, mapper = model._get_colors(cell_color, same_plot=same_plot)
if typp == ColorType.CAT:
if not hide_cells:
model._maybe_add_legend(
fig, ax, mapper=mapper, title=key, loc=obs_legend_loc, is_line=False
)
elif typp == ColorType.CONT:
if same_perc and show_cbar and not hide_cells:
if isinstance(color, np.ndarray):
# plotting cont. observation other than lin. probs as a color
vmin = np.min(color)
vmax = np.max(color)
else:
vmin = np.min([model.w_all for model in successful_models.values()])
vmax = np.max([model.w_all for model in successful_models.values()])
norm = mcolors.Normalize(vmin=vmin, vmax=vmax)
for ax in axes:
children = [
c
for c in ax.get_children()
if isinstance(c, mpl.collections.PathCollection)
]
if len(children):
children[0].set_norm(norm)
divider = make_axes_locatable(last_ax)
cax = divider.append_axes("right", size="2%", pad=0.1)
_ = mpl.colorbar.ColorbarBase(
cax,
norm=norm,
cmap=abs_prob_cmap,
label=key,
ticks=np.linspace(norm.vmin, norm.vmax, 5),
)
if same_plot and lineage_names != [None]:
model._maybe_add_legend(
fig,
ax,
mapper={ln: lineage_color_mapper[ln] for ln in successful_models.keys()},
loc=legend_loc,
)
def _position_legend(ax: mpl.axes.Axes, legend_loc: str, **kwargs) -> mpl.legend.Legend:
"""
Position legend in- or outside the figure.
Parameters
----------
ax
Ax where to position the legend.
legend_loc
Position of legend.
kwargs
Keyword arguments for :func:`matplotlib.pyplot.legend`.
Returns
-------
The created legend.
"""
if legend_loc == "center center out":
raise ValueError("Invalid option: `'center center out'`.")
if legend_loc == "best":
return ax.legend(loc="best", **kwargs)
tmp, loc = legend_loc.split(" "), ""
if len(tmp) == 1:
height, rest = tmp[0], []
width = "right" if height in ("upper", "top", "center") else "left"
else:
height, width, *rest = legend_loc.split(" ")
if rest:
if len(rest) != 1:
raise ValueError(
f"Expected only 1 additional modifier ('in' or 'out'), found `{list(rest)}`."
)
elif rest[0] not in ("in", "out"):
raise ValueError(
f"Invalid modifier `{rest[0]!r}`. Valid options are: `'in', 'out'`."
)
if rest[0] == "in": # ignore in, it's default
rest = []
if height in ("upper", "top"):
y = 1.55 if width == "center" else 1.025
loc += "upper"
elif height == "center":
y = 0.5
loc += "center"
elif height in ("lower", "bottom"):
y = -0.55 if width == "center" else -0.025
loc += "lower"
else:
raise ValueError(
f"Invalid legend position on y-axis: `{height!r}`. "
f"Valid options are: `'upper', 'top', 'center', 'lower', 'bottom'`."
)
if width == "left":
x = -0.05
loc += " right" if rest else " left"
elif width == "center":
x = 0.5
if height != "center": # causes to be like top center
loc += " center"
elif width == "right":
x = 1.05
loc += " left" if rest else " right"
else:
raise ValueError(
f"Invalid legend position on x-axis: `{width!r}`. "
f"Valid options are: `'left', 'center', 'right'`."
)
if rest:
kwargs["bbox_to_anchor"] = (x, y)
return ax.legend(loc=loc, **kwargs)
def _get_backend(model, backend: str) -> str:
return _DEFAULT_BACKEND if _is_any_gam_mgcv(model) else backend
@d.dedent
def _create_callbacks(
adata: AnnData,
callback: Optional[Callable],
obs: Sequence[str],
lineages: Sequence[Optional[str]],
perform_sanity_check: Optional[bool] = None,
**kwargs,
) -> Dict[str, Dict[str, Callable]]:
"""
Create models for each gene and lineage.
Parameters
----------
%(adata)s
callback
Gene and lineage specific prepare callbacks.
obs
Sequence of observations, such as genes.
lineages
Sequence of genes.
perform_sanity_check
Whether to check if all callbacks have the correct signature. This is done by instantiating
dummy model and running the function. We're assuming that the callback isn't really a pricey operation.
If `None`, it is only performed for non-default callbacks.
kwargs
Keyword arguments for ``callback`` when performing the sanity check.
Returns
-------
The created callbacks.
"""
def process_lineages(
obs_name: str, lin_names: Optional[Union[Callable, Dict[Optional[str], Any]]]
) -> None:
if lin_names is None:
lin_names = _default_model_callback
if callable(lin_names):
# sharing the same models for all lineages
for lin_name in lineages:
callbacks[obs_name][lin_name] = lin_names
return
elif not isinstance(lin_names, dict):
raise TypeError(
f"Expected the lineage callback to be either `callable` or a dictionary of callables, "
f"found `{type(lin_names).__name__!r}`."
)
lin_rest_callback = (
lin_names.get("*", _default_model_callback) or _default_model_callback
) # do not pop
if not callable(lin_rest_callback):
raise TypeError(
f"Expected the lineage fallback callback for gene `{obs_name!r}` to be `callable`, "
f"found `{type(lin_rest_callback).__name__!r}`."
)
for lin_name, cb in lin_names.items():
if lin_name == "*":
continue
if not callable(cb):
raise TypeError(
f"Expected the callback for gene `{obs_name!r}` and lineage `{lin_name!r}` "
f"to be `callable`, found `{type(cb).__name__!r}`."
)
callbacks[obs_name][lin_name] = cb
for lin_name in lineages - set(callbacks[obs_name].keys()):
callbacks[obs_name][lin_name] = lin_rest_callback
def maybe_sanity_check(callbacks: Dict[str, Dict[str, Callable]]) -> None:
if not perform_sanity_check:
return
from sklearn.svm import SVR
logg.debug("Performing callback sanity checks")
for gene in callbacks.keys():
for lineage, cb in callbacks[gene].items():
# create the model here because the callback can search the attribute
dummy_model = SKLearnModel(adata, model=SVR())
try:
model = cb(dummy_model, gene=gene, lineage=lineage, **kwargs)
assert model is dummy_model, (
"Creation of new models is not allowed. "
"Ensure that callback returns the same model."
)
assert (
model.prepared
), "Model is not prepared. Ensure that callback calls `.prepare()`."
assert (
model._gene == gene
), f"Callback modified the gene from `{gene!r}` to `{model._gene!r}`."
assert (
model._lineage == lineage
), f"Callback modified the lineage from `{lineage!r}` to `{model._lineage!r}`."
if isinstance(model, FailedModel):
model.reraise()
except Exception as e: # noqa: B902
raise RuntimeError(
f"Callback validation failed for gene `{gene!r}` and lineage `{lineage!r}`."
) from e
def all_callbacks_are_default(cbs: dict) -> bool:
# this correctly implicitly handles '*': None
for vs in cbs.values():
if isinstance(vs, dict):
for cb in vs.values():
if callable(cb) and cb is not _default_model_callback:
return False
elif callable(vs) and vs is not _default_model_callback:
return False
return True
if not len(lineages):
raise ValueError("No lineages have been selected.")
if not len(obs):
raise ValueError("No genes have been selected.")
if callback is None:
callback = _default_model_callback
if perform_sanity_check is None:
perform_sanity_check = (
not all_callbacks_are_default(callback)
if isinstance(callback, dict)
else callback is not _default_model_callback
)
if callable(callback):
callbacks = {o: {lin: callback for lin in lineages} for o in obs}
maybe_sanity_check(callbacks)
return callbacks
lineages, obs = (
set(_unique_order_preserving(lineages)),
set(_unique_order_preserving(obs)),
)
callbacks = defaultdict(dict)
if isinstance(callback, dict):
# can be specified as None
obs_rest_callback = (
callback.pop("*", _default_model_callback) or _default_model_callback
)
for obs_name, lin_names in callback.items():
process_lineages(obs_name, lin_names)
if callable(obs_rest_callback):
for obs_name in obs - set(callback.keys()):
process_lineages(obs_name, callback.get(obs_name, obs_rest_callback))
else:
raise TypeError(
f"Expected the gene fallback callback to be `callable`, "
f"found `{type(obs_rest_callback).__name__!r}`."
)
else:
raise TypeError(
f"Class `{type(callback).__name__!r}` must be `callable` or "
f"a gene and lineage specific `dict` of `callables`."
)
if set(callbacks.keys()) & obs != obs:
raise ValueError(
f"Missing gene callbacks for the following genes: `{list(obs - set(callbacks.keys()))}`."
)
for gene, vs in callbacks.items():
if set(vs.keys()) & lineages != lineages:
raise ValueError(
f"Missing lineage callbacks for gene `{gene!r}`: `{list(lineages - set(vs.keys()))}`."
)
maybe_sanity_check(callbacks)
return callbacks
def _default_model_callback(model: BaseModel, **kwargs) -> BaseModel:
# we could filter kwargs, but it's better not to - this will detect if we pass useless stuff
return model.prepare(**kwargs)
@d.dedent
def composition(
adata: AnnData,
key: str,
fontsize: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[float] = None,
save: Optional[Union[str, Path]] = None,
**kwargs: Any,
) -> None:
"""
Plot a pie chart for categorical annotation.
Parameters
----------
%(adata)s
key
Key in :attr:`anndata.AnnData.obs` containing categorical observation.
fontsize
Font size for the pie chart labels.
%(plotting)s
kwargs
Keyword arguments for :func:`matplotlib.pyplot.pie`.
Returns
-------
%(just_plots)s
"""
if key not in adata.obs:
raise KeyError(f"Data not found in `adata.obs[{key!r}]`.")
if not is_categorical_dtype(adata.obs[key]):
raise TypeError(
f"Expected `adata.obs[{key!r}]` is not `categorical`, "
f"found `{infer_dtype(adata.obs[key])}`."
)
colors = adata.uns.get(f"{key}_colors", None)
x = adata.obs[key].value_counts()
# plot these fractions in a pie plot
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
ax.pie(
x=x,
labels=x.index,
colors=colors,
textprops={"fontsize": fontsize},
**kwargs,
)
ax.set_title(f"composition by {key}")
if save is not None:
save_fig(fig, save)
# modified from: https://github.com/CarlEkerot/held-karp
def _held_karp(dists: np.ndarray) -> Tuple[float, np.ndarray]:
"""
Held-Karp algorithm solves the Traveling Salesman Problem.
This algorithm uses dynamic programming with memoization.
Parameters
----------
dists
Distance matrix.
Returns
-------
The cost and the path.
"""
n = len(dists)
# Maps each subset of the nodes to the cost to reach that subset, as well
# as what node it passed before reaching this subset.
# Node subsets are represented as set | |
- 30: o0oOOo0O0Ooo % OoOoOO00 * IiII % iIii1I11I1II1 % O0
elif ( IIiiIiIIiI1 . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 76 - 76: II111iiii * I11i
else :
lprint ( "Invalid LISP control packet type {}" . format ( IIiiIiIIiI1 . type ) )
if 29 - 29: OoooooooOO . i1IIi
return ( oooOOOOO0Oo )
if 46 - 46: I11i
if 92 - 92: IiII * OoO0O00 . OoOoOO00 + iII111i - I1IiiI
if 15 - 15: OoO0O00 / OoO0O00 * o0oOOo0O0Ooo * I1ii11iIi11i - o0oOOo0O0Ooo
if 47 - 47: I1IiiI / OoOoOO00 / II111iiii
if 7 - 7: oO0o . ooOoO0o
if 73 - 73: i1IIi % I1Ii111 * ooOoO0o % OoO0O00
if 70 - 70: ooOoO0o * I1ii11iIi11i
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl ) :
if 26 - 26: i11iIiiIii - II111iiii . II111iiii * oO0o / Ii1I + I1IiiI
o0O0o = bold ( "RLOC-probe" , False )
if 12 - 12: OoO0O00 * iIii1I11I1II1 % I1Ii111 . O0 * OoOoOO00 * OOooOOo
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( o0O0o ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 34 - 34: I1IiiI . i1IIi
if 38 - 38: iIii1I11I1II1
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( o0O0o ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 64 - 64: i1IIi / OoO0O00
if 68 - 68: I11i * O0 * oO0o + OoOoOO00 / IiII
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( o0O0o ) )
return
if 42 - 42: iIii1I11I1II1 % i1IIi - OoOoOO00 % I1ii11iIi11i * Ii1I + i11iIiiIii
if 40 - 40: OOooOOo
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , rloc_probe ,
keys , enc , auth , mr_ttl = - 1 ) :
OOO0iI1 = lisp_map_reply ( )
OOO0iI1 . rloc_probe = rloc_probe
OOO0iI1 . echo_nonce_capable = enc
OOO0iI1 . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
OOO0iI1 . record_count = 1
OOO0iI1 . nonce = nonce
iI1IIII1ii1 = OOO0iI1 . encode ( )
OOO0iI1 . print_map_reply ( )
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
I111IoOo0oOOO0o = lisp_eid_record ( )
I111IoOo0oOOO0o . rloc_count = len ( rloc_set )
I111IoOo0oOOO0o . authoritative = auth
I111IoOo0oOOO0o . record_ttl = ttl
I111IoOo0oOOO0o . action = action
I111IoOo0oOOO0o . eid = eid
I111IoOo0oOOO0o . group = group
if 66 - 66: OOooOOo . I1IiiI / iII111i
iI1IIII1ii1 += I111IoOo0oOOO0o . encode ( )
I111IoOo0oOOO0o . print_record ( " " , False )
if 68 - 68: II111iiii . OoOoOO00
Iii1i = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 96 - 96: iIii1I11I1II1 . o0oOOo0O0Ooo % Ii1I . iIii1I11I1II1
for ii1I1i11 in rloc_set :
i1iIiII = lisp_rloc_record ( )
OoOOoooO000 = ii1I1i11 . rloc . print_address_no_iid ( )
if ( OoOOoooO000 in Iii1i ) :
i1iIiII . local_bit = True
i1iIiII . probe_bit = rloc_probe
i1iIiII . keys = keys
if ( ii1I1i11 . priority == 254 and lisp_i_am_rtr ) :
i1iIiII . rloc_name = "RTR"
if 17 - 17: I1IiiI . oO0o + Oo0Ooo - I1ii11iIi11i % IiII
if 36 - 36: oO0o - Oo0Ooo + IiII
i1iIiII . store_rloc_entry ( ii1I1i11 )
i1iIiII . reach_bit = True
i1iIiII . print_record ( " " )
iI1IIII1ii1 += i1iIiII . encode ( )
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
return ( iI1IIII1ii1 )
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
Oo0oo = lisp_map_referral ( )
Oo0oo . record_count = 1
Oo0oo . nonce = nonce
iI1IIII1ii1 = Oo0oo . encode ( )
Oo0oo . print_map_referral ( )
if 31 - 31: I1ii11iIi11i
I111IoOo0oOOO0o = lisp_eid_record ( )
if 60 - 60: i1IIi % ooOoO0o / II111iiii * Oo0Ooo - i1IIi . Ii1I
OOOoo0ooooo0 = 0
if ( ddt_entry == None ) :
I111IoOo0oOOO0o . eid = eid
I111IoOo0oOOO0o . group = group
else :
OOOoo0ooooo0 = len ( ddt_entry . delegation_set )
I111IoOo0oOOO0o . eid = ddt_entry . eid
I111IoOo0oOOO0o . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 21 - 21: i1IIi
I111IoOo0oOOO0o . rloc_count = OOOoo0ooooo0
I111IoOo0oOOO0o . authoritative = True
if 10 - 10: i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 8 - 8: iII111i + iIii1I11I1II1 . I1ii11iIi11i
if 68 - 68: OoooooooOO . OoooooooOO % I1ii11iIi11i + i1IIi % OoooooooOO + Ii1I
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
O0oOo00O = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( OOOoo0ooooo0 == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
ooOiiI1Ii11 = ddt_entry . delegation_set [ 0 ]
if ( ooOiiI1Ii11 . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if ( ooOiiI1Ii11 . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
if 14 - 14: OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
if 92 - 92: OoO0O00 . i1IIi
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0oOo00O = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
O0oOo00O = ( lisp_i_am_ms and ooOiiI1Ii11 . is_ms_peer ( ) == False )
if 22 - 22: Ii1I . I1IiiI
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
I111IoOo0oOOO0o . action = action
I111IoOo0oOOO0o . ddt_incomplete = O0oOo00O
I111IoOo0oOOO0o . record_ttl = ttl
| |
<filename>CartPole.py
#!/usr/bin/env python3
import numpy as np
from scipy.stats.mstats import gmean
import theano.tensor as T
from ilqr import iLQR
from ilqr.cost import QRCost
from ilqr.dynamics import constrain
from ilqr.dynamics import AutoDiffDynamics
'''
Author: Sameer
Date: May 2019
'''
class CartPole(object):
'''
__init__: in this method we store all the variables required for formulating the Cart Pole iLQR problem.
'''
def __init__(self, delta_t, traj_steps, state_dynamics, initial_state, goal, cost_state, cost_action, terminal_cost):
self.dt = delta_t
self.N = traj_steps
self.xd = state_dynamics
self.x0 = initial_state
self.x_goal = goal
self.Q = cost_state
self.R = cost_action
self.Q_terminal = terminal_cost
self.J_hist = []
'''
state_inputs: in this method we generate the state and control vector. State for the give system are
1 - Position == x
2 - Linear velocity (of the cart) == x_dot
3 - Sine component of the angular position == sin_theta
4 - Cosine component of the angular position == cos_theta
5 - Angular velocity == theta_dot.
We use sine and cosine component of the angular position instead of the angular position because
we can having issue regarding angular wrap around issues, when solving the optimization problem.
Control for the system is given by
1 - Horizontal force on the cart == F_x
'''
def state_inputs(self):
x_input = [
T.dscalar("x"),
T.dscalar("x_dot"),
T.dscalar("sin_theta"),
T.dscalar("cos_theta"),
T.dscalar("theta_dot")
]
u_input = [
T.dscalar("F_x")
]
return x_input, u_input
'''
augment_state: in this method we change the state vector which is of the form [pos, vel, angular_pos, angular_vel]
to [pos, vel, sin(angular_pos), cos(angular_pos), angular_vel].
'''
def augment_state(self, X):
if X.ndim == 1:
x, x_dot, theta, theta_dot = X
else:
x = X[..., 0].reshape(-1, 1)
x_dot = X[..., 1].reshape(-1, 1)
theta = X[..., 2].reshape(-1, 1)
theta_dot = X[..., 3].reshape(-1, 1)
return np.hstack([x, x_dot, np.sin(theta), np.cos(theta), theta_dot])
'''
deaugment_state: in this method we change the state vector which is of the form
[pos, vel, sin(angular_pos), cos(angular_pos), angular_vel] to [pos, vel, angular_pos, angular_vel].
'''
def deaugment_state(self, X):
if X.ndim == 1:
x, x_dot, sin_theta, cos_theta, theta_dot = X
else:
x = X[..., 0].reshape(-1, 1)
x_dot = X[..., 1].reshape(-1, 1)
sin_theta = X[..., 2].reshape(-1, 1)
cos_theta = X[..., 3].reshape(-1, 1)
theta_dot = X[..., 4].reshape(-1, 1)
theta = np.arctan2(sin_theta, cos_theta)
return np.hstack([x, x_dot, theta, theta_dot])
'''
accel: this method is used for generating the linear acceleration, angular acceleration and next angle. All these values
are used for calculating the next states. Accelerations will be used to calculate the next time step velocities,
linear velocitiy will be used to calculate the next time step positions and angular velocity will be used to
calculate the next theta.
For calculating the accelerations we follow the non-linear system dynamics of the cart-pole problem.
Equation are given here:
https://pdfs.semanticscholar.org/f665/cfa1143e6414cdcb459c5d84796908af4727.pdf?_ga=2.177173158.203278840.1548098392562587694.1548098392
'''
def accel(self, X, u, Xd):
temp = (u[0] + Xd[0] * Xd[2] * X[4]**2 * X[2])/(Xd[1] + Xd[0])
num = Xd[3] * X[2] - X[3] * temp
den = Xd[2] * (4/3 - (Xd[0] * X[3]**2)/(Xd[1] + Xd[0]))
ang_acc = num/den
lin_acc = temp - (Xd[0] * Xd[2] * ang_acc * X[3])/(Xd[1] + Xd[0])
theta = T.arctan2(X[2], X[3])
next_theta = theta + X[4] * self.dt
return lin_acc, ang_acc, next_theta
'''
next_states: this method calculates the next time step states based on accelerations as described above.
'''
def next_states(self, X, lin_acc, ang_acc, next_theta):
f = T.stack([
X[0] + X[1] * self.dt,
X[1] + lin_acc * self.dt,
T.sin(next_theta),
T.cos(next_theta),
X[4] + ang_acc * self.dt,
]).T
return f
'''
on_iteration: this method will help print useful information that is required so that we can keep track
about what is happening.
'''
def on_iteration(self, iteration_count, xs, us, J_opt, accepted, converged):
self.J_hist.append(J_opt)
info = "converged" if converged else ("accepted" if accepted else "failed")
final_state = self.deaugment_state(xs[-1])
print("iteration", iteration_count, info, J_opt, final_state)
'''
run_IterLinQuadReg: this method is the main function which will run the iLQR on the Cart Pole problem. Steps that are happening
1 - Calculate the state vectors (Symbolic vector)
2 - Calculate the accelerations (Symbolic vector)
3 - Generate the next state (Symbolic vector)
4 - Differentiate the states and generate the dynamics
5 - Set the goal
6 - Set cost based on if the termial cost is provided or not
7 - Set the initial state
8 - Guess some control action
9 - Run iLQR
'''
def run_IterLinQuadReg(self, us_init=None):
x_input, u_input = self.state_inputs()
x_dot_dot, theta_dot_dot, theta_prime = self.accel(x_input, u_input, self.xd)
f = self.next_states(x_input, x_dot_dot, theta_dot_dot, theta_prime)
dynamics = AutoDiffDynamics(f, x_input, u_input,hessians=False)
x_goal = self.augment_state(self.x_goal)
if self.Q_terminal.all() == None:
cost = QRCost(self.Q, self.R)
else:
cost = QRCost(self.Q, self.R, Q_terminal=self.Q_terminal, x_goal=x_goal)
x0 = self.augment_state(self.x0)
if us_init == None:
us_init = np.random.uniform(-1, 1, (self.N, dynamics.action_size))
ilqr = iLQR(dynamics, cost, self.N,hessians=False)
xs, us = ilqr.fit(x0, us_init, on_iteration=self.on_iteration, n_iterations=1000)
# print(ilqr._K,'this is capital K')
return xs, us, ilqr._k, ilqr._K
'''
next_states_matrix: this method will work same as next_state except that it will take A, B state-space matrices as input
'''
def next_states_matrix(self, X, U, A, B, C):
theta = T.arctan2(X[2], X[3])
next_theta = theta + X[4] * self.dt
f = T.stack([
X[0] * A[0][0] + X[1] * A[0][1] + X[2] * A[0][2] + X[3] * A[0][3] + X[4] * A[0][4] + U[0] * A[0][5] + B[0][0],
X[0] * A[1][0] + X[1] * A[1][1] + X[2] * A[1][2] + X[3] * A[1][3] + X[4] * A[1][4] + U[0] * A[1][5] + B[1][0],
T.sin(next_theta),
T.cos(next_theta),
X[0] * A[4][0] + X[1] * A[4][1] + X[2] * A[4][2] + X[3] * A[4][3] + X[4] * A[4][4] + U[0] * A[4][5] + B[4][0]
])
return f
'''
run_IterLinQuadReg_matrix: this method will run iLQR when we are given A, B, C state-space matrices
X(k+1) = A * [X(k) U(k)].T + B ---- Evolution of state over time is governed by this equation
'''
def run_IterLinQuadReg_matrix(self, A, B, C, dist_info_sharing='AM', us_init=None):
x_input, u_input = self.state_inputs()
if np.ndim(A) != 2:
if dist_info_sharing == 'GM':
A = gmean(A, axis=0)
B = gmean(B, axis=0)
C = gmean(C, axis=0)
print(A.shape,'A', B.shape,'B',C.shape,'C')
elif dist_info_sharing == 'AM':
A = np.sum(A, axis=0)/A.shape[0]
B = np.sum(B, axis=0, keepdims=True)/B.shape[0]
B = B.T
C = np.sum(C, axis=0)/C.shape[0]
else:
pass
f = self.next_states_matrix(x_input, u_input, A, B, C)
dynamics = AutoDiffDynamics(f, x_input, u_input)
x_goal = self.augment_state(self.x_goal)
if self.Q_terminal.all() == None:
cost = QRCost(self.Q, self.R)
else:
cost = QRCost(self.Q, self.R, Q_terminal=self.Q_terminal, x_goal=x_goal)
x0 = self.augment_state(self.x0)
if us_init == None:
us_init = np.random.uniform(-1, 1, (self.N, dynamics.action_size))
ilqr = iLQR(dynamics, cost, self.N)
xs, us = ilqr.fit(x0, us_init, on_iteration=self.on_iteration)
return xs, us
'''
control_pattern: This modify the control actions based on the user input.
1 - Normal: based on the correction/mixing parameter gamma generate control
(gamma controls how much noise we want).
2 - MissingValue: based on the given percentage, set those many values to zero
(it is implicitly it uses "Normal" generated control is used).
3 - Shuffle: shuffles the entire "Normal" generated control sequence.
4 - TimeDelay: takes the "Normal" generated control and shifts it by 1 index i.e. one unit time delay.
5 - Extreme: sets gamma as zeros and generates control based on only noise.
'''
def control_pattern(self, u, pattern, mean, var, gamma, percent):
if pattern == 'Normal':
u = gamma * u + (1 - gamma) * np.random.normal(mean, var, u.shape)
elif pattern == 'MissingValue':
n = int(u.shape[0] * percent * 0.01)
index = np.random.randint(0, u.shape[0], n)
u = gamma * u + (1 - gamma) * np.random.normal(mean, var, u.shape)
u[index, :] = 0
elif pattern == 'Shuffle':
u = gamma * u + (1 - gamma) * np.random.normal(mean, var, u.shape)
np.random.shuffle(u)
elif pattern == 'TimeDelay':
u = gamma * u + (1 - gamma) * np.random.normal(mean, var, u.shape)
u = np.roll(u, 1, axis=0)
u[0, :] = 0
elif pattern == 'Extreme':
u = np.random.normal(mean, var, u.shape)
return u
'''
noise_traj_generator: In this method we generate trajectories based on some inital condition and noisy | |
bits, static_row + 1, bcol)
header_offset = static_row + 1
__writeAnchorListData(sheet, seq_ref[seq], bits, row + header_offset, bcol,
data_func = lambda data, test: data[test][_KB],
data_format = __S_BIT_FORMAT,
abs_format = __S_BIT_ABS_FORMAT)
# Write sequence
sheet.cell(row = row + header_offset, column = bcol - 1).value = __AL_SEQ_FORMAT.format(seq)
# write psnr tests
if psnr:
if 'pcol' not in locals():
pcol = sheet.max_column + 3
sheet.cell(row = static_row, column = pcol - 1).value = __AL_PSNR_HEADER
sheet.cell(row = static_row + 1, column = pcol - 1).value = __AL_TEST
sheet.cell(row = static_row + 2, column = pcol - 1).value = __AL_ANCHOR
#sheet.cell(row = static_row + 3, column = pcol - 1).value = __AL_SEQ
pcol_end = __writeAnchorListHeader(sheet, psnr, static_row + 1, pcol)
header_offset = static_row + 1
__writeAnchorListData(sheet, seq_ref[seq], psnr, row + header_offset, pcol,
data_func = lambda data, test: data[test][_PSNR],
data_format = __S_PSNR_FORMAT,
abs_format = __S_PSNR_ABS_FORMAT)
# Write sequence
sheet.cell(row = row + header_offset, column = pcol - 1).value = __AL_SEQ_FORMAT.format(seq)
# write time matrix
if time:
if 'tcol' not in locals():
tcol = sheet.max_column + 3
sheet.cell(row = static_row, column = tcol - 1).value = __AL_TIME_HEADER
sheet.cell(row = static_row + 1, column = tcol - 1).value = __AL_TEST
sheet.cell(row = static_row + 2, column = tcol - 1).value = __AL_ANCHOR
#sheet.cell(row = static_row + 3, column = tcol - 1).value = __AL_SEQ
tcol_end = __writeAnchorListHeader(sheet, time, static_row + 1, tcol)
header_offset = static_row + 1
__writeAnchorListData(sheet, seq_ref[seq], time, row + header_offset, tcol,
data_func = lambda data, test: data[test][_TIME],
data_format = __S_TIME_FORMAT,
abs_format = __S_TIME_ABS_FORMAT)
# Write sequence
sheet.cell(row = row + header_offset, column = tcol - 1).value = __AL_SEQ_FORMAT.format(seq)
# Make columns wider
for column in range(sheet.max_column):
sheet.column_dimensions[get_column_letter(column+1)].width = getMaxLength(list(data_refs.keys()) + list(order))
#Add conditional formatting
form_ranges = []
color_rules = []
#BDRATE
if bdbr:
form_ranges.append("{}:{}".format(get_column_letter(bdcol)+str(first_row),get_column_letter(bdcol_end)+str(row)))
color_rules.append(ColorScaleRule(start_type='percentile', start_value=90, start_color='63BE7B',
mid_type='num', mid_value=0, mid_color='FFFFFF',
end_type='percentile', end_value=10, end_color='F8696B' ))
if bits:
form_ranges.append("{}:{}".format(get_column_letter(bcol)+str(first_row),get_column_letter(bcol_end)+str(row)))
color_rules.append(ColorScaleRule(start_type='min', start_color='4F81BD',
mid_type='num', mid_value=1, mid_color='FFFFFF',
end_type='percentile', end_value=80, end_color='F8696B' ))
if psnr:
form_ranges.append("{}:{}".format(get_column_letter(pcol)+str(first_row),get_column_letter(pcol_end)+str(row)))
color_rules.append(ColorScaleRule(start_type='percentile', start_value=90, start_color='63BE7B',
mid_type='num', mid_value=0, mid_color='FFFFFF',
end_type='percentile', end_value=10, end_color='F8696B' ))
if time:
form_ranges.append("{}:{}".format(get_column_letter(tcol)+str(first_row),get_column_letter(tcol_end)+str(row)))
color_rules.append(ColorScaleRule(end_type='min', start_color='9BDE55',#'63BE7B',
mid_type='num', mid_value=1, mid_color='FFFFFF',
start_type='percentile', end_value=80, end_color='00BBEF'))
for (f_range, c_rule) in zip(form_ranges, color_rules):
sheet.conditional_formatting.add(f_range, c_rule)
def __writeAnchorListHeader(sheet: Worksheet, sub_def: AnchorSubType, row: int, col: int, allow_none: bool = True) -> None:
#Write horizontal headers/test names
tmp_col = col
for (test,anchors) in sub_def.items():
for anchor in anchors:
if not allow_none and not anchor:
continue
sheet.cell(row = row, column = tmp_col).value = test
sheet.cell(row = row + 1, column = tmp_col).value = anchor
tmp_col += 1
return tmp_col - 1
def __writeAnchorListData(sheet: Worksheet, ref: Dict[str, dict], sub_def: AnchorSubType, row: int, col: int, *, data_func: Callable[[Dict[str, dict], str], Union[float,int]], data_format: str, number_format: Union[None, str] = None, number_style: str = 'Percent', abs_format: Union[None, str] = None, abs_style: str = 'Comma') -> None:
from .TestSuite import parseSheetLayer
#final_r = row+len(data.keys())
#final_c = col+len(data.keys())
c = col
for (test, anchors) in sub_def.items():
for anchor in anchors:
value_format = data_format
value_style = number_style
anchor_res = []
if not anchor:
if abs_format:
value_format = abs_format
if abs_style:
value_style = abs_style
else:
continue
else:
anchor_res =[__SR_FORMAT.format(sheet=parseSheetLayer(anchor)[0],cell=cl) for cl in data_func(ref, anchor)]
test_res =[__SR_FORMAT.format(sheet=parseSheetLayer(test)[0],cell=cl) for cl in data_func(ref, test)]
sheet.cell(row = row, column = c).value = value_format.format(*(anchor_res + test_res))
sheet.cell(row = row, column = c).style = value_style
if number_format:
sheet.cell(row = row, column = c).number_format = number_format
sheet.cell(row= row, column= c).alignment = xl.styles.Alignment(horizontal='center')
c += 1
# Set conditional coloring
#form_range = "{}:{}".format(get_column_letter(col)+str(row),get_column_letter(final_c)+str(final_r))
#sheet.conditional_formatting.add(form_range, color_scale_rule)
#######################################
# BDBRMatrix summary type definitions #
#######################################
__S_SEQ_HEADER = "Sequence {} results"
__S_BIT_HEADER = r"Bit comparisons"
__S_PSNR_HEADER = r"PSNR comparisons (dB)"
__S_TIME_HEADER = r"Encoding time comparisons"
__S_HEADER = "Result summary matrix (bdrate, bit, PSNR, Time comparisons)"
"""
Handle writing the BDBRMatrix summary sheet
"""
def __writeBDBRMatrix(sheet: Worksheet, data_refs: SummaryRefType, order: List[str] = None, *, write_bdbr: bool, write_bits:bool, write_psnr: bool, write_time: bool, **other: dict):
from .TestSuite import _PSNR, _KBS, _KB, _TIME
seq_ref = __flip_dict(data_refs) # transform data_refs to seq_ref[<seq>][<test_name>] order
order = order if order else list(seq_ref.keys())
#print(seq_ref)
# For each sequence generate the comparison matrix
sheet.cell(row = 1, column = 1).value = __S_HEADER
#for (seq,ref) in sorted(seq_ref.items()):
for seq in order:
ref = seq_ref[seq]
tests = sorted(ref.keys())
row = sheet.max_row + 2
brow = row
prow = row
trow = row
col = 1 #sheet.max_column + 1
sheet.cell(row = row, column = col).value = __S_SEQ_HEADER.format(seq) #Write sequence header
# write bdrate matrix
if write_bdbr:
sheet.merge_cells(start_column=col,start_row=row,end_column=col+len(tests),end_row=row)
(row, col) = __writeSummaryMatrixHeader(sheet, tests, row+1, col)
__writeSummaryDataMatrix(sheet, ref, row, col,
data_func = lambda data, test: data[test][_KBS] + data[test][_PSNR],
data_format = __S_BDRATE_FORMAT,
number_format = '0.00%',
color_scale_rule = ColorScaleRule(start_type='percentile', start_value=90, start_color='63BE7B',
mid_type='num', mid_value=0, mid_color='FFFFFF',
end_type='percentile', end_value=10, end_color='F8696B' ))
# write bit matrix
if write_bits:
if 'bcol' not in locals():
bcol = sheet.max_column + 2
sheet.cell(row = brow, column = bcol).value = __S_BIT_HEADER
sheet.merge_cells(start_column=bcol,start_row=brow,end_column=bcol+len(tests),end_row=brow)
(brow, col) = __writeSummaryMatrixHeader(sheet, tests, brow+1, bcol)
__writeSummaryDataMatrix(sheet, ref, brow, colb,
data_func = lambda data, test: data[test][_KB],
data_format = __S_BIT_FORMAT,
color_scale_rule = ColorScaleRule(start_type='min', start_color='4F81BD',
mid_type='num', mid_value=1, mid_color='FFFFFF',
end_type='percentile', end_value=80, end_color='F8696B' ))
# write psnr matrix
if write_psnr:
if 'pcol' not in locals():
pcol = sheet.max_column + 2
sheet.cell(row = prow, column = pcol).value = __S_PSNR_HEADER
sheet.merge_cells(start_column=pcol,start_row=prow,end_column=pcol+len(tests),end_row=prow)
(prow, col) = __writeSummaryMatrixHeader(sheet, tests, prow+1, pcol)
__writeSummaryDataMatrix(sheet, ref, prow, colb,
data_func = lambda data, test: data[test][_PSNR],
data_format = __S_PSNR_FORMAT,
number_style = 'Comma',
def_val = 0,
color_scale_rule = ColorScaleRule(start_type='percentile', start_value=90, start_color='63BE7B',
mid_type='num', mid_value=0, mid_color='FFFFFF',
end_type='percentile', end_value=10, end_color='F8696B' ))
# write time matrix
if write_time:
if 'tcol' not in locals():
tcol = sheet.max_column + 2
sheet.cell(row = trow, column = tcol).value = __S_TIME_HEADER
sheet.merge_cells(start_column=tcol,start_row=trow,end_column=tcol+len(tests),end_row=trow)
(trow, col) = __writeSummaryMatrixHeader(sheet, tests, trow+1, tcol)
__writeSummaryDataMatrix(sheet, ref, trow, col,
data_func = lambda data, test: data[test][_TIME],
data_format = __S_TIME_FORMAT,
color_scale_rule = ColorScaleRule(start_type='min', start_color='9BDE55',#'63BE7B',
mid_type='num', mid_value=1, mid_color='FFFFFF',
end_type='percentile', end_value=80, end_color='00BBEF'))
# Make columns wider
for col in range(sheet.max_column):
sheet.column_dimensions[get_column_letter(col+1)].width = getMaxLength(list(data_refs.keys()))
"""
Write summary base header
@return row and col of start of data field
"""
def __writeSummaryMatrixHeader(sheet: Worksheet, tests: List[str], row: int, col: int) -> Tuple[int,int]:
d_row = row + 1
d_col = col + 1
#Write horizontal headers/test names
tmp_col = col + 1
for test in tests:
sheet.cell(row = row, column = tmp_col).value = test
tmp_col += 1
#Write vertical
tmp_row = row + 1
for test in tests:
sheet.cell(row = tmp_row, column = col).value = test
tmp_row += 1
return d_row, d_col
"""
Write summary matrix data array
"""
def __writeSummaryDataMatrix(sheet: Worksheet, data: Dict[str, dict], row: int, col: int, *, data_func: Callable[[Dict[str, dict], str], Union[float,int]], data_format: str, number_format: Union[str, None] = None, number_style: str = 'Percent', def_val: str = '-', color_scale_rule: Rule):
from .TestSuite import parseSheetLayer
test_col = col - 1
test_row = row - 1
final_r = row+len(data.keys())
final_c = col+len(data.keys())
for r in range(row,row+len(data.keys())):
for c in range(col,col+len(data.keys())):
t2 = sheet.cell(row = r, column = test_col).value
t1 = sheet.cell(row = test_row, column = c).value
if t1 == t2:
sheet.cell(row = r, column = c).value = def_val
else:
r1 =[__SR_FORMAT.format(sheet=parseSheetLayer(t1)[0],cell=cl) for cl in data_func(data, t1)]
r2 =[__SR_FORMAT.format(sheet=parseSheetLayer(t2)[0],cell=cl) for cl in data_func(data, t2)]
sheet.cell(row = r, column = c).value = data_format.format(*(r1+r2))
sheet.cell(row = r, column = c).style = number_style
if number_format:
sheet.cell(row = r, column = c).number_format = number_format
sheet.cell(row=r,column=c).alignment = xl.styles.Alignment(horizontal='center')
# Set conditional coloring
form_range = "{}:{}".format(get_column_letter(col)+str(row),get_column_letter(final_c)+str(final_r))
sheet.conditional_formatting.add(form_range, color_scale_rule)
#######################################
# curve_chart summary type definitions #
#######################################
__DATA = "data"
def __writeCurveChart(sheet: Worksheet, data_refs: SummaryRefType, order: List[str] = None, *, tests: Iterable[Union[str]], charts: Iterable[Tuple[str,str]], **other: dict) -> None:
refs = __writeCurveChartData(sheet, tests, order, data_refs)
__writeCharts(sheet, tests, order, charts, refs)
# Write data used by charts and return per test Reference ranges
def __writeCurveChartData(sheet: Worksheet, tests: Iterable[str], order: Iterable[str], data: Dict[str, dict]) -> dict:
from .TestSuite import _PSNR, _KBS, _TIME, parseSheetLayer
to_write_data = []
out_ref = {}
col = 2
ref_len = 4
def toRefFunc(sheet, cells):
return ["=" + __SR_FORMAT.format(sheet=parseSheetLayer(sheet)[0], cell = cell) for cell in cells]
for seq in order:
to_write_data.append([seq,])
out_ref[seq] = {}
to_write_data.append(["Data Type", "Test", "Data Point 1", "Data Point 2", "Data Point 3", "Data Point 4"])
for test in tests:
out_ref[seq][test] = {}
to_write_data.append([_KBS, test, *toRefFunc(test, data[test][seq][_KBS])])
to_write_data.append([_PSNR, test, *toRefFunc(test, data[test][seq][_PSNR])])
to_write_data.append([_TIME, | |
<reponame>agentdavidjoseph/slycat
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
"""Functions for managing cached remote ssh sessions.
Slycat makes extensive use of ssh and the `Slycat Agent` to access remote
resources located on the high performance computing platforms used to generate
ensembles. This module provides functionality to create cached remote ssh /
agent sessions that can be used to retrieve data from remote hosts. This
functionality is used in a variety of ways:
* Web clients can browse the filesystem of a remote host.
* Web clients can create a Slycat model using data stored on a remote host.
* Web clients can retrieve images on a remote host (an essential part of the parameter-image-model).
* Web clients can retrieve video compressed from still images on a remote host.
When a remote session is created, a connection to the remote host over ssh is
created, an agent is started (only if the required configuration is present),
and a unique session identifier is returned. Callers use the session id to
retrieve the cached session and communicate with the remote host / agent. A
"last access" time for each session is maintained and updated whenever the
cached session is accessed. If a session times-out (a threshold amount of time
has elapsed since the last access) it is automatically deleted, and subsequent
use of the expired session id will fail.
Each session is bound to the IP address of the client that created it - only
the same client IP address is allowed to access the session.
"""
import datetime
import json
import os
import base64
import stat
import sys
import threading
import time
import uuid
import cherrypy
import paramiko
import socket
import slycat.mime_type
import slycat.web.server.authentication
import slycat.web.server.database
import slycat.web.server.streaming
import slycat.web.server
def cache_object(pid, key, content_type, content):
cherrypy.log.error("cache_object %s %s %s" % (pid, key, content_type))
database = slycat.web.server.database.couchdb.connect()
project = database.get("project", pid)
slycat.web.server.authentication.require_project_reader(project)
lookup = pid + "-" + key
for cache_object in database.scan("slycat/project-key-cache-objects", startkey=lookup, endkey=lookup):
database.put_attachment(cache_object, filename="content", content_type=content_type, content=content)
return
cache_object = {
"_id": uuid.uuid4().hex,
"type": "cache-object",
"project": pid,
"key": key,
"created": datetime.datetime.utcnow().isoformat(),
"creator": cherrypy.request.login,
}
database.save(cache_object)
database.put_attachment(cache_object, filename="content", content_type=content_type, content=content)
session_cache = {}
session_cache_lock = threading.Lock()
class Session(object):
"""Encapsulates an open session connected to a remote host.
Examples
--------
Calling threads must serialize access to the Session object. To facilitate this,
a Session is a context manager - callers should always use a `with statement` when
accessing a session:
>>> with slycat.web.server.remote.get_session(sid) as session:
... print session.username
"""
def __init__(self, sid, client, username, hostname, ssh, sftp, agent=None):
now = datetime.datetime.utcnow()
self._client = client
self._sid = sid
self._username = username
self._hostname = hostname
self._ssh = ssh
self._sftp = sftp
self._agent = agent
self._created = now
self._accessed = now
self._lock = threading.Lock()
def __enter__(self):
self._lock.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._lock.__exit__(exc_type, exc_value, traceback)
@property
def client(self):
"""Return the IP address of the client that created the session."""
return self._client
@property
def username(self):
"""Return the username used to create the session."""
return self._username
@property
def hostname(self):
"""Return the remote hostname accessed by the session."""
return self._hostname
@property
def sftp(self):
return self._sftp
@property
def accessed(self):
"""Return the time the session was last accessed."""
return self._accessed
def close(self):
if self._agent is not None:
cherrypy.log.error(
"Instructing remote agent for %s@%s from %s to shutdown." % (self.username, self.hostname, self.client))
stdin, stdout, stderr = self._agent
command = {"action": "exit"}
stdin.write("%s\n" % json.dumps(command))
stdin.flush()
self._sftp.close()
self._ssh.close()
def submit_batch(self, filename):
"""
Submits a command to the slycat-agent to start an input batch file on a cluster running SLURM.
Parameters
----------
filename : string
Name of the batch file
Returns
-------
response : dict
A dictionary with the following keys: filename, jid, errors
"""
if self._agent is not None:
stdin, stdout, stderr = self._agent
payload = {"action": "submit-batch", "command": filename}
stdin.write("%s\n" % json.dumps(payload))
stdin.flush()
response = json.loads(stdout.readline())
if not response["ok"]:
cherrypy.response.headers["x-slycat-message"] = response["message"]
cherrypy.log.error("slycat.web.server.remote.py submit_batch",
"cherrypy.HTTPError 400 %s" % response["message"])
raise cherrypy.HTTPError(400)
# parses out the job ID
jid = [int(s) for s in response["output"].split() if s.isdigit()][0]
return {"filename": response["filename"], "jid": jid, "errors": response["errors"]}
else:
cherrypy.response.headers["x-slycat-message"] = "No Slycat agent present on remote host."
cherrypy.log.error("slycat.web.server.remote.py submit_batch",
"cherrypy.HTTPError 500 no Slycat agent present on remote host.")
raise cherrypy.HTTPError(500)
def checkjob(self, jid):
"""
Submits a command to the slycat-agent to check the status of a submitted job to a cluster running SLURM.
Parameters
----------
jid : int
Job ID
Returns
-------
response : dict
A dictionary with the following keys: jid, status, errors
"""
cherrypy.log.error('calling checkjob in remote')
if self._agent is not None:
stdin, stdout, stderr = self._agent
payload = {"action": "checkjob", "command": jid}
try:
stdin.write("%s\n" % json.dumps(payload))
stdin.flush()
except socket.error as e:
delete_session(self._sid)
raise socket.error('Socket is closed')
response = json.loads(stdout.readline())
if not response["ok"]:
cherrypy.response.headers["x-slycat-message"] = response["message"]
cherrypy.log.error("slycat.web.server.remote.py checkjob",
"cherrypy.HTTPError 400 %s" % response["message"])
raise cherrypy.HTTPError(400)
# parses the useful information from job status
#cherrypy.log.error("response state:%s" % response["output"])
status = {
"state": response["output"]
}
return {"jid": response["jid"], "status": status, "errors": response["errors"], "logFile":response["logFile"]}
else:
cherrypy.response.headers["x-slycat-message"] = "No Slycat agent present on remote host."
cherrypy.log.error("slycat.web.server.remote.py checkjob",
"cherrypy.HTTPError 500 no Slycat agent present on remote host.")
raise cherrypy.HTTPError(500)
def cancel_job(self, jid):
"""
Submits a command to the slycat-agent to cancel a running job on a cluster running SLURM.
Parameters
----------
jid : int
Job ID
Returns
-------
response : dict
A dictionary with the following keys: jid, output, errors
"""
if self._agent is not None:
stdin, stdout, stderr = self._agent
payload = {"action": "cancel-job", "command": jid}
stdin.write("%s\n" % json.dumps(payload))
stdin.flush()
response = json.loads(stdout.readline())
if not response["ok"]:
cherrypy.response.headers["x-slycat-message"] = response["message"]
cherrypy.log.error("slycat.web.server.remote.py cancel_job",
"cherrypy.HTTPError 400 %s" % response["message"])
raise cherrypy.HTTPError(400)
return {"jid": response["jid"], "output": response["output"], "errors": response["errors"]}
else:
cherrypy.response.headers["x-slycat-message"] = "No Slycat agent present on remote host."
cherrypy.log.error("slycat.web.server.remote.py cancel_job",
"cherrypy.HTTPError 500 no Slycat agent present on remote host.")
raise cherrypy.HTTPError(500)
def get_job_output(self, jid, path):
"""
Submits a command to the slycat-agent to fetch the content of the a job's output file from a cluster running SLURM.
Note that the expected format for the output file is slurm-[jid].out.
Parameters
----------
jid : int
Job ID
Returns
-------
response : dict
A dictionary with the following keys: jid, output, errors
"""
if self._agent is not None:
stdin, stdout, stderr = self._agent
payload = {"action": "get-job-output", "command": {"jid": jid, "path": path}}
stdin.write("%s\n" % json.dumps(payload))
stdin.flush()
response = json.loads(stdout.readline())
if not response["ok"]:
cherrypy.response.headers["x-slycat-message"] = response["message"]
cherrypy.log.error("slycat.web.server.remote.py get_job_output",
"cherrypy.HTTPError 400 %s" % response["message"])
raise cherrypy.HTTPError(400)
return {"jid": response["jid"], "output": response["output"], "errors": response["errors"]}
else:
cherrypy.response.headers["x-slycat-message"] = "No Slycat agent present on remote host."
cherrypy.log.error("slycat.web.server.remote.py get_job_output",
"cherrypy.HTTPError 500 no Slycat agent present on remote host.")
raise cherrypy.HTTPError(500)
def get_user_config(self):
"""
Submits a command to the slycat-agent to fetch the content of a user's .slycatrc file in their home directory.
Returns
-------
response : dict
A dictionary with the configuration values
"""
if self._agent is not None:
stdin, stdout, stderr = self._agent
payload = {"action": "get-user-config"}
stdin.write("%s\n" % json.dumps(payload))
stdin.flush()
response = json.loads(stdout.readline())
if not response["ok"]:
cherrypy.response.headers["x-slycat-message"] = response["message"]
cherrypy.log.error("slycat.web.server.remote.py get_user_config",
"cherrypy.HTTPError 400 %s" % response["message"])
raise cherrypy.HTTPError(400)
return {"config": response["config"], "errors": response["errors"]}
else:
cherrypy.response.headers["x-slycat-message"] = "No Slycat agent present on remote host."
cherrypy.log.error("slycat.web.server.remote.py get_user_config",
"cherrypy.HTTPError 500 no Slycat agent present on remote host.")
raise cherrypy.HTTPError(500)
def set_user_config(self, config):
"""
Submits a command to the slycat-agent to set the content of a user's .slycatrc file in their home directory.
Returns
-------
response : dict
"""
if self._agent is not None:
stdin, stdout, stderr = self._agent
payload = {"action": "set-user-config", "command": {"config": config}}
stdin.write("%s\n" % json.dumps(payload))
stdin.flush()
response = json.loads(stdout.readline())
if not response["ok"]:
cherrypy.response.headers["x-slycat-message"] = response["message"]
cherrypy.log.error("slycat.web.server.remote.py set_user_config",
"cherrypy.HTTPError 400 %s" % response["message"])
raise cherrypy.HTTPError(400)
return {"errors": response["errors"]}
else:
cherrypy.response.headers["x-slycat-message"] = "No Slycat agent present on remote host."
cherrypy.log.error("slycat.web.server.remote.py set_user_config",
"cherrypy.HTTPError 500 no Slycat agent present on remote host.")
raise cherrypy.HTTPError(500)
def run_remote_command(self, command):
"""
run a remote command from an HPC source running a slycat
agent. the command could be things such as starting an hpc
script | |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Hilbert space approximation for Gaussian processes.
============================================================
This example replicates a few of the models in the excellent case
study by <NAME> [1] (originally written using R and Stan).
The case study uses approximate Gaussian processes [2] to model the
relative number of births per day in the US from 1969 to 1988.
The Hilbert space approximation is way faster than the exact Gaussian
processes because it circumvents the need for inverting the
covariance matrix.
The original case study presented by Aki also emphasizes the iterative
process of building a Bayesian model, which is excellent as a pedagogical
resource. Here, however, we replicate only 4 out of all the models available in [1].
There are a few minor differences in the mathematical details of our models,
which we had to make in order for the chains to mix properly. We have clearly
commented on the places where our models are different.
**References:**
1. <NAME>, Simpson, et al (2020), `"Bayesian workflow book - Birthdays"
<https://avehtari.github.io/casestudies/Birthdays/birthdays.html>`.
2. <NAME>, Bürkner PC, Andersen MR, et al (2020),
"Practical hilbert space approximate bayesian gaussian processes for probabilistic programming".
.. image:: ../_static/img/examples/hsgp.png
:align: center
"""
import argparse
import functools
import operator
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
import numpyro
from numpyro import deterministic, plate, sample
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
# --- utility functions
def load_data():
URL = "https://raw.githubusercontent.com/avehtari/casestudies/master/Birthdays/data/births_usa_1969.csv"
data = pd.read_csv(URL, sep=",")
day0 = pd.to_datetime("31-Dec-1968")
dates = [day0 + pd.Timedelta(f"{i}d") for i in data["id"]]
data["date"] = dates
data["births_relative"] = data["births"] / data["births"].mean()
return data
def save_samples(out_path, samples):
"""
Save dictionary of arrays using numpys compressed binary format
Fast reading and writing and efficient storage
"""
np.savez_compressed(out_path, **samples)
class UnivariateScaler:
"""
Standardizes the data to have mean 0 and unit standard deviation.
"""
def __init__(self):
self._mean = None
self._std = None
def fit(self, x):
self._mean = np.mean(x)
self._std = np.std(x)
return self
def transform(self, x):
return (x - self._mean) / self._std
def inverse_transform(self, x):
return x * self._std + self._mean
def _agg(*args, scaler=None):
"""
Custom function for aggregating the samples
and transforming back to the desired scale.
"""
total = functools.reduce(operator.add, args)
return (100 * scaler.inverse_transform(total)).mean(axis=0)
# --- modelling functions
def modified_bessel_first_kind(v, z):
v = jnp.asarray(v, dtype=float)
return jnp.exp(jnp.abs(z)) * tfp.math.bessel_ive(v, z)
def spectral_density(w, alpha, length):
c = alpha * jnp.sqrt(2 * jnp.pi) * length
e = jnp.exp(-0.5 * (length ** 2) * (w ** 2))
return c * e
def diag_spectral_density(alpha, length, L, M):
"""spd for squared exponential kernel"""
sqrt_eigenvalues = jnp.arange(1, 1 + M) * jnp.pi / 2 / L
return spectral_density(sqrt_eigenvalues, alpha, length)
def phi(x, L, M):
"""
The first `M` eigenfunctions of the laplacian operator in `[-L, L]`
evaluated at `x`. These are used for the approximation of the
squared exponential kernel.
"""
m1 = (jnp.pi / (2 * L)) * jnp.tile(L + x[:, None], M)
m2 = jnp.diag(jnp.linspace(1, M, num=M))
num = jnp.sin(m1 @ m2)
den = jnp.sqrt(L)
return num / den
def diag_spectral_density_periodic(alpha, length, M):
"""
Not actually a spectral density but these are used in the same
way. These are simply the first `M` coefficients of the Taylor
expansion approximation for the periodic kernel.
"""
a = length ** (-2)
J = jnp.arange(1, M + 1)
q2 = (2 * alpha ** 2 / jnp.exp(a)) * modified_bessel_first_kind(J, a)
return q2
def phi_periodic(x, w0, M):
"""
Basis functions for the approximation of the periodic kernel.
"""
m1 = jnp.tile(w0 * x[:, None], M)
m2 = jnp.diag(jnp.linspace(1, M, num=M))
mw0x = m1 @ m2
return jnp.cos(mw0x), jnp.sin(mw0x)
# --- models
class GP1:
"""
Long term trend Gaussian process
"""
def __init__(self):
self.x_scaler = UnivariateScaler()
self.y_scaler = UnivariateScaler()
def model(self, x, L, M, y=None):
# intercept
intercept = sample("intercept", dist.Normal(0, 1))
# long term trend
ρ = sample("ρ", dist.LogNormal(-1.0, 1.0))
α = sample("α", dist.HalfNormal(1.0))
eigenfunctions = phi(x, L, M)
spd = jnp.sqrt(diag_spectral_density(α, ρ, L, M))
with plate("basis1", M):
β1 = sample("β1", dist.Normal(0, 1))
f1 = deterministic("f1", eigenfunctions @ (spd * β1))
μ = deterministic("μ", intercept + f1)
σ = sample("σ", dist.HalfNormal(0.5))
with plate("n_obs", x.shape[0]):
sample("y", dist.Normal(μ, σ), obs=y)
def get_data(self):
data = load_data()
x = data["id"].values
y = data["births_relative"].values
self.x_scaler.fit(x)
self.y_scaler.fit(y)
xsd = jnp.array(self.x_scaler.transform(x))
ysd = jnp.array(self.y_scaler.transform(y))
return dict(
x=xsd,
y=ysd,
L=1.5 * max(xsd),
M=10,
)
def make_figure(self, samples):
data = load_data()
dates = data["date"]
y = 100 * data["births_relative"]
μ = 100 * self.y_scaler.inverse_transform(samples["μ"]).mean(axis=0)
f = plt.figure(figsize=(15, 5))
plt.axhline(100, color="k", lw=1, alpha=0.8)
plt.plot(dates, y, marker=".", lw=0, alpha=0.3)
plt.plot(dates, μ, color="r", lw=2)
plt.ylabel("Relative number of births")
plt.xlabel("")
return f
class GP2:
"""
Long term trend with year seasonality component.
"""
def __init__(self):
self.x_scaler = UnivariateScaler()
self.y_scaler = UnivariateScaler()
def model(self, x, w0, J, L, M, y=None):
intercept = sample("intercept", dist.Normal(0, 1))
# long term trend
ρ1 = sample("ρ1", dist.LogNormal(-1.0, 1.0))
α1 = sample("α1", dist.HalfNormal(1.0))
eigenfunctions = phi(x, L, M)
spd = jnp.sqrt(diag_spectral_density(α1, ρ1, L, M))
with plate("basis", M):
β1 = sample("β1", dist.Normal(0, 1))
# year-periodic component
ρ2 = sample("ρ2", dist.HalfNormal(0.1))
α2 = sample("α2", dist.HalfNormal(1.0))
cosines, sines = phi_periodic(x, w0, J)
spd_periodic = jnp.sqrt(diag_spectral_density_periodic(α2, ρ2, J))
with plate("periodic_basis", J):
β2_cos = sample("β2_cos", dist.Normal(0, 1))
β2_sin = sample("β2_sin", dist.Normal(0, 1))
f1 = deterministic("f1", eigenfunctions @ (spd * β1))
f2 = deterministic(
"f2", cosines @ (spd_periodic * β2_cos) + sines @ (spd_periodic * β2_sin)
)
μ = deterministic("μ", intercept + f1 + f2)
σ = sample("σ", dist.HalfNormal(0.5))
with plate("n_obs", x.shape[0]):
sample("y", dist.Normal(μ, σ), obs=y)
def get_data(self):
data = load_data()
x = data["id"].values
y = data["births_relative"].values
self.x_scaler.fit(x)
self.y_scaler.fit(y)
xsd = jnp.array(self.x_scaler.transform(x))
ysd = jnp.array(self.y_scaler.transform(y))
w0 = 2 * jnp.pi / (365.25 / self.x_scaler._std)
return dict(
x=xsd,
y=ysd,
w0=w0,
J=20,
L=1.5 * max(xsd),
M=10,
)
def make_figure(self, samples):
data = load_data()
dates = data["date"]
y = 100 * data["births_relative"]
y_by_day_of_year = 100 * data.groupby("day_of_year2")["births_relative"].mean()
μ = 100 * self.y_scaler.inverse_transform(samples["μ"]).mean(axis=0)
f1 = 100 * self.y_scaler.inverse_transform(samples["f1"]).mean(axis=0)
f2 = 100 * self.y_scaler.inverse_transform(samples["f2"]).mean(axis=0)
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
axes[0].plot(dates, y, marker=".", lw=0, alpha=0.3)
axes[0].plot(dates, μ, color="r", lw=2, alpha=1, label="Total")
axes[0].plot(dates, f1, color="C2", lw=3, alpha=1, label="Trend")
axes[0].set_ylabel("Relative number of births")
axes[0].set_title("All time")
axes[1].plot(
y_by_day_of_year.index, y_by_day_of_year, marker=".", lw=0, alpha=0.5
)
axes[1].plot(
y_by_day_of_year.index, f2[:366], color="r", lw=2, label="Year seaonality"
)
axes[1].set_ylabel("Relative number of births")
axes[1].set_xlabel("Day of year")
for ax in axes:
ax.axhline(100, color="k", lw=1, alpha=0.8)
ax.legend()
return fig
class GP3:
"""
Long term trend with yearly seasonaly and slowly varying day-of-week effect.
"""
def __init__(self):
self.x_scaler = UnivariateScaler()
self.y_scaler = UnivariateScaler()
def model(self, x, day_of_week, w0, J, L, M, L3, M3, y=None):
intercept = sample("intercept", dist.Normal(0, 1))
# long term trend
ρ1 = sample("ρ1", dist.LogNormal(-1.0, 1.0))
α1 = sample("α1", dist.HalfNormal(1.0))
eigenfunctions = phi(x, L, M)
spd = jnp.sqrt(diag_spectral_density(α1, ρ1, L, M))
with plate("basis", M):
β1 = sample("β1", dist.Normal(0, 1))
# year-periodic component
ρ2 = sample("ρ2", dist.HalfNormal(0.1))
α2 = sample("α2", dist.HalfNormal(1.0))
cosines, sines = phi_periodic(x, w0, J)
spd_periodic = jnp.sqrt(diag_spectral_density_periodic(α2, ρ2, J))
with plate("periodic_basis", J):
β2_cos = sample("β2_cos", dist.Normal(0, 1))
β2_sin = sample("β2_sin", dist.Normal(0, 1))
# day of week effect
with plate("plate_day_of_week", 6):
β_week = sample("β_week", dist.Normal(0, 1))
# next enforce sum-to-zero -- this is slightly different from Aki's model,
# which instead imposes Monday's effect to be zero.
β_week = jnp.concatenate([jnp.array([-jnp.sum(β_week)]), β_week])
# long term variation of week effect
α3 = sample("α3", dist.HalfNormal(0.1))
ρ3 = sample("ρ3", dist.LogNormal(1.0, 1.0)) # prior: very long-term effect
eigenfunctions_3 = phi(x, L3, M3)
spd_3 = jnp.sqrt(diag_spectral_density(α3, ρ3, L3, M3))
with plate("week_trend", M3):
β3 = sample("β3", dist.Normal(0, 1))
# combine
f1 = deterministic("f1", eigenfunctions @ (spd * β1))
f2 = deterministic(
"f2", cosines @ (spd_periodic * β2_cos) + sines @ (spd_periodic * β2_sin)
)
g3 = deterministic("g3", eigenfunctions_3 @ (spd_3 * β3))
μ = deterministic("μ", intercept + f1 + f2 + jnp.exp(g3) * β_week[day_of_week])
σ = sample("σ", dist.HalfNormal(0.5))
with plate("n_obs", x.shape[0]):
sample("y", dist.Normal(μ, σ), obs=y)
def get_data(self):
data = load_data()
x = data["id"].values
y = data["births_relative"].values
self.x_scaler.fit(x)
self.y_scaler.fit(y)
xsd = jnp.array(self.x_scaler.transform(x))
ysd = jnp.array(self.y_scaler.transform(y))
w0 = 2 * jnp.pi / (365.25 / self.x_scaler._std)
dow = jnp.array(data["day_of_week"].values) - 1
return dict(
x=xsd,
day_of_week=dow,
w0=w0,
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
#Global mathematical model of SAC-A with 31 nodes and according to design
# status of March 3, 1997. Orbit of 51.6 degrees of inclination in winter
# solstice (beta=15), at 205 mn (380 km). Attitude: panels always deployed
# and directed towards the sun, satellite main axis perpendicular to the
# ecliptic plane.
# NOMINAL HOT CASE WITH 23W (INTERNAL)
#----------------------------------------------------------------------------
import tkinter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from solver import Node, ThermalModel, FDMExplicit
from matplotlib.figure import Figure
import datetime
inicio = datetime.datetime.now() # Tiempo de Compilación
#HEADER CONTROL DATA
EMLI = 0.01 # MLI effective emissivity
PINF = 9.0 # Internal power dissipated at lower platform
PSUP = 14.00 # Internal power dissipated at upper platform
F_AREA = 0.31 # Area correction for radiator
# Tiempo de Simulación
t_start = 0.0
t_end = 16579.8 #12000 #55266.0
# Tiempos en los que cambia las potencias
power_times1 = [ 0.0, 16580.0, 16580.0, 17300.0, 17300.0, 55266.0,]
power_Low= [1.0, 1.0, 1.0, 1.0, 1.0, 1.0,]
power_Upp= [1.0, 1.0, 4.429, 4.429, 1.0, 1.0,]
for index in range(len(power_Low)):
power_Low[index] = power_Low[index]*PINF
for index in range(len(power_Upp)):
power_Upp[index]=power_Upp[index]*PSUP
def generate_power_function(time, power):
from bisect import bisect
power_array = power
power_times1 = time
def get_power_X(time):
return power_array[bisect(power_times1, time)-1]
return get_power_X
power_L= generate_power_function(power_times1, power_Low)
power_U = generate_power_function(power_times1, power_Upp)
power_times = [ 0.0, 230.4, 460.8, 690.6, 921.0,
1151.4, 1381.8, 1611.6, 1842.0, 2072.4,
2302.8, 2532.6, 2763.0, 2993.4, 3223.8,
3454.2, 3684.0, 3914.4, 4144.8, 4375.2,
4605.0, 4835.0, 5065.8, 5296.2, 5526.6,
5526.6, 5757.0, 5987.4, 6217.2, 6447.6, # 2da orbita
6678.0, 6908.4, 7138.2, 7368.6, 7599.0,
7829.4, 8059.2, 8289.6, 8520.0, 8750.4,
8980.8, 9210.6, 9441.0, 9671.4, 9901.8,
10131.6,10361.6,10592.4,10822.8,11053.2,
11053.2,11283.6,11514.0,11743.8,11974.2, # 3ra orbita
12204.6,12435.0,12664.8,12895.2,13125.6,
13356.0,13585.8,13816.2,14046.6,14277.0,
14507.4,14737.2,14967.6,15198.0,15428.4,
15658.2,15888.2,16119.0,16349.4,16579.8,]
power_node_103 = [ 7.64, 7.30, 6.92, 6.52, 6.17,
5.96, 5.89, 5.96, 6.17, 6.52,
6.92, 7.30, 7.64, 8.03, 8.46,
8.88, 9.25, 9.51, 9.60, 9.51,
9.25, 8.88, 8.46, 8.03, 7.64,]
power_node_103 = power_node_103*3
power_node_104 = [14.16, 13.95, 13.68, 13.41, 13.17,
13.02, 12.97, 13.02, 13.17, 13.41,
13.68, 13.95, 14.16, 14.34, 14.49,
14.60, 14.69, 14.75, 14.77, 14.75,
14.69, 14.60, 14.48, 14.34, 14.16,]
power_node_104 = power_node_104*3
power_node_105 = [73.27, 73.22, 73.11, 72.96, 72.81,
72.70, 72.67, 72.70, 72.81, 72.96,
73.11, 73.22, 73.27, 73.36, 73.64,
73.96, 74.25, 74.43, 74.49, 74.43,
74.25, 73.96, 73.64, 73.36, 73.27,]
power_node_105 = power_node_105*3
power_node_106 = [ 4.31, 4.87, 5.69, 6.55, 7.29,
7.78, 7.95, 7.78, 7.28, 6.54,
5.69, 4.87, 4.31, 3.88, 3.49,
3.17, 2.93, 2.78, 2.73, 2.78,
2.93, 3.17, 3.50, 3.88, 4.31,]
power_node_106 = power_node_106*3
power_node_107 = [74.76, 75.19, 75.83, 76.65, 77.60,
78.59, 79.50, 80.20, 80.57, 80.55,
80.19, 79.62, 79.05, 78.59, 78.35,
78.19, 78.01, 77.73, 77.34, 76.82,
76.21, 75.54, 74.96, 74.64, 74.76,]
power_node_107 = power_node_107*3
power_node_108 = [79.05, 79.63, 80.20, 80.55, 80.57,
80.20, 79.50, 78.58, 77.60, 76.65,
75.83, 75.18, 74.76, 74.64, 74.96,
75.54, 76.21, 76.82, 77.34, 77.73,
78.01, 78.19, 78.35, 78.59, 79.05,]
power_node_108 = power_node_108*3
power_node_109 = [ 6.82, 6.81, 6.73, 6.60, 6.42,
6.23, 6.04, 5.87, 5.74, 5.64,
5.58, 5.56, 5.57, 5.60, 5.68,
5.79, 5.94, 6.10, 6.27, 6.43,
6.58, 6.69, 6.76, 6.80, 6.82,]
power_node_109 = power_node_109*3
power_node_110 = [ 5.57, 5.56, 5.58, 5.64, 5.74,
5.87, 6.04, 6.23, 6.42, 6.60,
6.73, 6.81, 6.82, 6.80, 6.76,
6.69, 6.58, 6.43, 6.27, 6.10,
5.94, 5.79, 5.68, 5.60, 5.57,]
power_node_110 = power_node_110*3
#
power_node_111= [ 1.58, 1.71, 1.83, 1.91, 1.95,
1.96, 1.96, 1.96, 1.95, 1.91,
1.83, 1.71, 1.58, 1.46, 1.34,
1.20, 1.06, 0.95, 0.90, 0.95,
1.06, 1.20, 1.34, 1.46, 1.58,]
power_node_111 = power_node_111*3
#
power_node_112= [2.47, 2.61, 2.75, 2.85, 2.92,
2.96, 2.98, 2.96, 2.92, 2.85,
2.75, 2.61, 2.47, 2.34, 2.21,
2.10, 1.99, 1.91, 1.88, 1.91,
1.99, 2.10, 2.21, 2.34, 2.47,]
power_node_112 = power_node_112*3
power_node_113= [0.21, 0.15, 0.09, 0.04, 0.01,
0.00, 0.00, 0.00, 0.01, 0.04,
0.09, 0.15, 0.21, 0.29, 0.37,
0.45, 0.52, 0.58, 0.60, 0.58,
0.52, 0.45, 0.37, 0.29, 0.21, ]
power_node_113 = power_node_113*3
#
power_node_114= [ 7.43, 10.01, 13.15, 16.40, 19.29,
21.34, 22.10, 21.33, 19.29, 16.40,
13.15, 10.00, 7.43, 5.56, 4.16,
3.18, 2.63, 2.39, 2.32, 2.39,
2.63, 3.18, 4.16, 5.56, 7.43,]
power_node_114 = power_node_114*3
#
power_node_115= [ 7.43, 10.01, 13.15, 16.40, 19.29,
21.34, 22.10, 21.33, 19.29, 16.40,
13.15, 10.00, 7.43, 5.56, 4.16,
3.18, 2.63, 2.39, 2.32, 2.39,
2.63, 3.18, 4.16, 5.56, 7.43,]
power_node_115 = power_node_115*3
#
power_node_116= [ 8.64, 6.56, 4.74, 3.36, 2.55,
2.27, 2.24, 2.27, 2.55, 3.36,
4.74, 6.56, 8.64, 11.08, 13.72,
16.27, 18.47, 20.02, 20.60, 20.02,
18.47, 16.27, 13.72, 11.08, 8.64,]
power_node_116 = power_node_116*3
#
power_node_117= [ 5.76, 5.95, 5.85, 5.47, 4.82,
3.94, 2.94, 1.95, 1.09, 0.47,
0.14, 0.07, 0.05, 0.05, 0.08,
0.27, 0.63, 1.12, 1.74, 2.48,
3.27, 4.07, 4.82, 5.43, 5.76,]
power_node_117 = power_node_117*3
#
power_node_118= [ 0.05, 0.07, 0.14, 0.47, 1.10,
1.95, 2.94, 3.94, 4.82, 5.47,
5.85, 5.95, 5.76, 5.43, 4.82,
4.07, 3.27, 2.48, 1.74, 1.12,
0.63, 0.27, 0.08, 0.05, 0.05,]
power_node_118 = power_node_118*3
#
power_node_119= [ 0.04, 0.05, 0.13, 0.51, 1.23,
2.27, 3.51, 4.81, 5.92, 6.73,
7.17, 7.22, 6.93, 6.55, 5.86,
4.98, 4.01, 3.06, 2.17, 1.40,
0.77, 0.33, 0.08, 0.03, 0.04,]
power_node_119 = power_node_119*3
#
power_node_120= [ 1.13, 1.55, 2.03, 2.54, 3.02,
3.40, 3.60, 3.60, 3.45, 3.15,
2.74, 2.29, 1.87, 1.50, 1.16,
0.87, 0.62, 0.43, 0.29, 0.21,
0.23, 0.36, 0.55, 0.80, 1.13,]
power_node_120 = power_node_120*3
#
power_node_121= [1.27, 1.68, 2.15, 2.60, 2.94,
3.13, 3.17, 3.02, 2.66, 2.22,
1.74, 1.29, 0.89, 0.59, 0.37,
0.20, 0.07, 0.04, 0.07, 0.12,
0.22, 0.39, 0.63, 0.93, 1.27,]
power_node_121 = power_node_121*3
#
power_node_122= [ 1.87, 2.30, 2.75, 3.15, 3.45,
3.60, 3.60, 3.40, 3.02, 2.54,
2.03, 1.55, 1.13, 0.80, 0.55,
0.36, 0.23, 0.21, 0.29, 0.43,
0.62, 0.87, 1.16, 1.50, 1.87,]
power_node_122 = power_node_122*3
#
power_node_123= [ 0.89, 1.29, 1.74, 2.22, 2.66,
3.02, 3.17, 3.13, 2.94, 2.60,
2.15, 1.68, 1.27, 0.93, 0.63,
0.39, 0.22, 0.12, 0.07, 0.04,
0.07, 0.20, 0.37, 0.59, 0.89,]
power_node_123 = power_node_123*3
#
power_node_126= [ 3.33, 3.67, 3.97, 4.04, 3.83,
3.37, 2.74, 2.05, 1.40, 0.86,
0.49, 0.28, 0.18, 0.16, 0.19,
0.29, 0.47, 0.74, 1.08, 1.48,
1.91, 2.35, 2.75, 3.08, 3.33,]
power_node_126 = power_node_126*3
for index in range(len(power_node_126)):
power_node_126[index] = power_node_126[index]*F_AREA
#
power_node_127= [ 0.18, 0.28, 0.49, 0.86, 1.40,
2.05, 2.74, 3.37, 3.83, 4.04,
3.97, 3.67, 3.33, 3.08, 2.75,
2.35, 1.91, 1.48, 1.08, 0.74,
0.47, 0.29, 0.19, 0.16, 0.18,]
power_node_127 = power_node_127*3
for index in range(len(power_node_127)):
power_node_127[index] = power_node_127[index]*F_AREA
#
power_node_129= [ 1.86, 2.14, 2.42, 2.67, 2.87,
2.99, 3.04, 2.99, 2.87, 2.67,
2.42, 2.14, 1.86, 1.61, 1.38,
1.15, 0.93, 0.78, 0.73, 0.78,
0.93, 1.15, 1.38, 1.61, 1.86,]
power_node_129 = power_node_129*3
def generate_power_function(time, power):
from bisect import bisect
power_array = power
power_times = time
def get_power_X(time):
return power_array[bisect(power_times, time)-1]
return get_power_X
power_103 = generate_power_function(power_times, power_node_103)
power_104 = generate_power_function(power_times, power_node_104)
power_105 = generate_power_function(power_times, power_node_105)
power_106 = generate_power_function(power_times, power_node_106)
power_107 = generate_power_function(power_times, power_node_107)
power_108 = generate_power_function(power_times, power_node_108)
power_109 = generate_power_function(power_times, power_node_109)
power_110 = generate_power_function(power_times, power_node_110)
power_111 = generate_power_function(power_times, power_node_111)
power_112 = generate_power_function(power_times, power_node_112)
power_113 = generate_power_function(power_times, power_node_113)
power_114 = generate_power_function(power_times, power_node_114)
power_115 = generate_power_function(power_times, power_node_115)
power_116 = generate_power_function(power_times, power_node_116)
power_117 = generate_power_function(power_times, power_node_117)
power_118 = generate_power_function(power_times, power_node_118)
power_119 = generate_power_function(power_times, power_node_119)
power_120 = generate_power_function(power_times, power_node_120)
power_121 = generate_power_function(power_times, power_node_121)
power_122 = generate_power_function(power_times, power_node_122)
power_123 = generate_power_function(power_times, power_node_123)
power_126 = generate_power_function(power_times, power_node_126)
power_127 = generate_power_function(power_times, power_node_127)
power_129 = generate_power_function(power_times, power_node_129)
def power_mtr(time): # Variación de Potencia en MTR
if time == 0.0:
return 0.0
elif time >= 100.0 and time <= 600.0:
return 200.0
#elif time >= 700.0 and time =< 899.0:
# return 0.0
elif time >= 900.0 and time <= 1400.0:
return 200.0
else:
return 0.0
model = ThermalModel()
#HEADER NODE DATA
model.addNode(Node(1, 14400.0, 297.98277018, power_L, 'Lower platform'))
model.addNode(Node(2, 22400.0, 298.24525503, power_U, 'Upper platform'))
model.addNode(Node(3, 1600.0, 296.06883956, power_103, 'Interface ring'))
model.addNode(Node(4, 400.0,329.76981036, power_104, 'Front panel radiator'))
model.addNode(Node(5, 450.0,352.92355844, power_105, 'Front solar panel'))
model.addNode(Node(6, 450.0,287.98149354, power_106, 'Rear solar panel'))
model.addNode(Node(7, 450.0,326.16023767, power_107, 'Lateral solar panel _1'))
model.addNode(Node(8, 450.0,326.16113637, power_108, 'Lateral solar panel _2'))
model.addNode(Node(9, 150.0,334.52013919, power_109, 'Silicon cell SiCELL_2'))
model.addNode(Node(10, 150.0,334.28346766, power_110, 'Silicon cell SiCELL_1'))
model.addNode(Node(11, 200.0,272.24511844, power_111, 'RF antena'))
model.addNode(Node(12, 300.0,278.89073804, power_112, 'Upper microSwitch'))
model.addNode(Node(13, 300.0, 289.79374615, power_113, 'Lower microSwitch'))
model.addNode(Node(14, 0.1,230.57181926, power_114, 'MLI-Upper platform'))
model.addNode(Node(15, 0.1,231.29593541, power_115, 'MLI-lateral_2'))
model.addNode(Node(16, 0.1, 230.27827859, power_116, 'MLI-Lower platform'))
model.addNode(Node(17, 0.1,246.37801687,power_117, 'Shunt_2'))
model.addNode(Node(18, 0.1,246.37762173, power_118, 'Shunt_1'))
model.addNode(Node(19, 0.1,231.29564763, power_119, 'MLI-lateral_1'))
model.addNode(Node(20, 100.0, 238.06403638, power_120, 'GPS_1 Antenna'))
model.addNode(Node(21, 100.0,240.55841625, power_121, 'GPS_2 Antenna'))
model.addNode(Node(22, 100.0, 238.06404797, power_122, 'GPS_3 Antenna'))
model.addNode(Node(23, 100.0,240.5584162, power_123, 'GPS_4 Antenna'))
model.addNode(Node(24, 250.0, 299.55312394, lambda x: 0.0, 'Structure - lateral_1'))
model.addNode(Node(25, 250.0, 296.43180947, lambda x: 0.0, 'Structure - rear'))
model.addNode(Node(26, 450.0,295.62728453, power_126, 'Radiator_2'))
model.addNode(Node(27, 450.0,295.62727054, power_127, 'Radiator_1'))
model.addNode(Node(28, 250.0, 299.55318916, lambda x: 0.0, 'Structure - lateral_2'))
model.addNode(Node(29, 3.0, 231.75567726, power_129, 'MLI - magnetometer'))
model.addNode(Node(30, 300.0, 287.14924544, lambda x: 0.0,'Magnetometer'))
model.addNode(Node(31, 1100.0,301.97360573, lambda x: 0.0, 'Structure - front'))
model.addNode(Node(32, 3.0,339.90875354, lambda x: 0.0, 'Mathematical node'))
model.addNode(Node(-99, 0.10, 0.0, lambda x: 0.0, 'Space'))
#HEADER CONDUCTOR DATA
# CONDUCTANCIAS
model.addConductance(1, 31, 1.10) | |
import torch
import warnings
from torch.optim.optimizer import Optimizer, required
import math
import itertools as it
import torch.optim as optim
warnings.filterwarnings("once")
def get_optimizer(optimizer: str = 'Adam',
lookahead: bool = False,
model=None,
separate_decoder: bool = True,
lr: float = 1e-3,
lr_e: float = 1e-3):
"""
# https://github.com/lonePatient/lookahead_pytorch/blob/master/run.py
:param optimizer:
:param lookahead:
:param model:
:param separate_decoder:
:param lr:
:param lr_e:
:return:
"""
if separate_decoder:
params = [
{'params': model.cls_head.parameters(), 'lr': lr
},
{'params': model.encoder.parameters(), 'lr': lr_e},
]
else:
params = [{'params': model.parameters(), 'lr': lr}]
if optimizer == 'Adam':
optimizer = optim.Adam(params, lr=lr)
elif optimizer == 'RAdam':
optimizer = RAdam(params, lr=lr)
elif optimizer == 'Ralamb':
optimizer = Ralamb(params, lr=lr)
elif optimizer == 'AdamW':
optimizer = AdamW(params, lr=lr)
elif optimizer == 'diffGrad':
optimizer = diffGrad(params, lr=lr)
elif optimizer == 'diffRGrad':
optimizer = diffRGrad(params, lr=lr)
else:
raise ValueError('unknown base optimizer type')
if lookahead:
optimizer = Lookahead(base_optimizer=optimizer, k=5, alpha=0.5)
return optimizer
class diffGrad(Optimizer):
r"""Implements diffGrad algorithm. It is modified from the pytorch implementation of Adam.
It has been proposed in `diffGrad: An Optimization Method for Convolutional Neural Networks`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _diffGrad: An Optimization Method for Convolutional Neural Networks:
https://arxiv.org/abs/1909.11015
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, version=0, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
#save version
self.version = version
def __setstate__(self, state):
super().__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('diffGrad does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Previous gradient
state['previous_grad'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq, previous_grad = state['exp_avg'], state['exp_avg_sq'], state['previous_grad']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# compute diffgrad coefficient (dfc)
if self.version==0:
diff = abs(previous_grad - grad)
elif self.version ==1:
diff = previous_grad-grad
elif self.version ==2:
diff = .5*abs(previous_grad - grad)
if self.version==0 or self.version==1:
dfc = 1. / (1. + torch.exp(-diff))
elif self.version==2:
dfc = 9. / (1. + torch.exp(-diff))-4 #DFC2 = 9/(1+e-(.5/g/)-4 #range .5,5
state['previous_grad'] = grad
# update momentum with dfc
exp_avg1 = exp_avg * dfc
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg1, denom)
return loss
class diffRGrad(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
version=0,
weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
self.version = version
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])
super(diffRGrad, self).__init__(params, defaults)
def __setstate__(self, state):
super(diffRGrad, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('diffGRad does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# Previous gradient
state['previous_grad'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
state['previous_grad'] = state['previous_grad'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
previous_grad = state['previous_grad']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
# compute diffgrad coefficient (dfc)
#print("grad = ",grad.size())
#print("prev_grad = ",previous_grad.size())
if self.version==0:
diff = abs(previous_grad - grad)
elif self.version ==1:
diff = previous_grad-grad
elif self.version ==2:
diff = .5*abs(previous_grad - grad)
if self.version==0 or self.version==1:
dfc = 1. / (1. + torch.exp(-diff))
elif self.version==2:
dfc = 9. / (1. + torch.exp(-diff))-4 #DFC2 = 9/(1+e-(.5/g/)-4 #range .5,5
state['previous_grad'] = grad
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
# update momentum with dfc
#print("dfc ",dfc.size())
#print("exp_avg ",exp_avg.size())
exp_avg1 = exp_avg * dfc.float()
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg1, denom)
p.data.copy_(p_data_fp32)
elif step_size > 0:
#print("exp_avg in elif",exp_avg.size())
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an | |
"full".')
return None
if df is None:
self._logger.warning('Return df format not provided. Default is "pred".')
return None
if svd == True:
# update SVD for dimensionality reducation
self.svd()
data = self.data
else:
data = self.UI
gmm = GaussianMixture(n_components=n, n_init=10, covariance_type=covariance_type, tol=1e-3, max_iter=500)
self.gmm_pred = gmm.fit_predict(data)
self.gmm_pred = pd.DataFrame(self.gmm_pred, columns = ['cluster'])
# Return new dataframe with clusters, and probability of belonging to a cluster
if df == 'pred':
self.gmm_pred.index += 1
return self.gmm_pred
elif df == 'proba':
cols = ['proba_C'+str(int) for int in range(n)]
proba = self.gmm_pred.join(pd.DataFrame(gmm.predict_proba(data), columns = cols))
proba.index += 1 # adjust index to match user
return proba
elif df == 'all':
cols = ['proba_C'+str(int) for int in range(n)]
proba = self.gmm_pred.join(pd.DataFrame(gmm.predict_proba(data), columns = cols))
proba.index += 1 # adjust index to match user
return [data, proba]
else:
self._logger.error("Invalid input. Enter 'all', 'pred' or 'proba'.")
return None
# print graphs to evaluate kmeans clustering from 2 to n clusters using
def gmm_eval(self, n, covariance_type="full"):
if n is None:
self._logger.error('Number of maximum clusters not provided')
return None
if covariance_type is None:
self._logger.warning('Covariance Type for Gaussian Mixture Model not provided. Default is "full"')
return None
# update SVD for dimensionality reducation
self.svd()
# variable scope limited to function
gmm_aic = []
gmm_bic = []
gmm_scores = []
# calculate scores
for i in range(2,n+1):
gmm = GaussianMixture(n_components=i,n_init=10, covariance_type = covariance_type, tol=1e-3,max_iter=500).fit(self.data)
# Akaike Information Criterion
gmm_aic.append(gmm.aic(self.data))
# Bayesian Information Criterion
gmm_bic.append(gmm.bic(self.data))
gmm_scores.append(gmm.score(self.data))
# Plot the scores
plt.figure(figsize=(14,21))
plt.subplot(3,1,1)
#plt.title("The Gaussian Mixture model AIC for determining number of clusters, CT = "+covariance_type,fontsize=16)
plt.scatter(x=[i for i in range(2,n+1)],y=np.log(gmm_aic),s=150,edgecolor='k')
plt.grid(True)
plt.xlabel("Number of clusters",fontsize=14)
plt.ylabel("Log of Gaussian mixture AIC score",fontsize=15)
plt.xticks([i for i in range(2,n+1)],fontsize=14)
plt.yticks(fontsize=15)
plt.subplot(3,1,2)
#plt.title("The Gaussian Mixture model BIC for determining number of clusters, CT = "+covariance_type,fontsize=16)
plt.scatter(x=[i for i in range(2,n+1)],y=np.log(gmm_bic),s=150,edgecolor='k')
plt.grid(True)
plt.xlabel("Number of clusters",fontsize=14)
plt.ylabel("Log of Gaussian mixture BIC score",fontsize=15)
plt.xticks([i for i in range(2,n+1)],fontsize=14)
plt.yticks(fontsize=15)
plt.subplot(3,1,3)
#plt.title("The Gaussian Mixture model scores for determining number of clusters, CT = "+covariance_type,fontsize=16)
plt.scatter(x=[i for i in range(2,n+1)],y=gmm_scores,s=150,edgecolor='k')
plt.grid(True)
plt.xlabel("Number of clusters",fontsize=14)
plt.ylabel("Gaussian mixture score",fontsize=15)
plt.xticks([i for i in range(2,n+1)],fontsize=14)
plt.yticks(fontsize=15)
plt.show()
return None
def plot_scatter(self, show_cluster, model):
# logger warning if no clusters to plot/colour
if show_cluster:
if model == 'gmm':
if self.gmm_pred is None:
self._logger.error("Gaussian Mixture Model not trained. Use data.gmm(n, covariance_type, df) to train before plotting")
return None
clusters = self.gmm_pred
elif model == 'kmeans':
if self.km_pred is None:
self._logger.error("K-Means Model not trained. Use data.kmeans(n) to train before plotting")
return None
clusters = self.km_pred
marker = {'size': 3,'opacity': 0.8,'color':clusters['cluster'],'colorscale':'Viridis'}
else:
marker = {'size': 3,'opacity': 0.8,'colorscale':'Viridis'}
# check input dataset to plot
if len(self.data.columns) >= 3:
if len(self.data.columns) > 3:
self._logger.warning("Input dataset contains more than 3 features. 3D scatter plot will only plot first 3 features.")
# plot 3D scatter plot
# Configure Plotly to be rendered inline in the notebook.
py.init_notebook_mode()
# Configure the trace.
trace = go.Scatter3d(
x=self.data[0], # <-- Put your data instead
y=self.data[1], # <-- Put your data instead
z=self.data[2], # <-- Put your data instead
mode='markers',
marker=marker
)
# Configure the layout.
layout = go.Layout(
margin={'l': 0, 'r': 0, 'b': 0, 't': 0}
)
data = [trace]
plot_figure = go.Figure(data=data, layout=layout)
# Render the plot.
py.iplot(plot_figure)
return None
elif len(self.data.columns) == 2:
self._logger.warning("Input dataset contains only 2 features. 2D scatter plot will be created.")
# plot 2D scatter plot
fig = go.Figure(data=go.Scatter(
x=self.data[0],
y=self.data[1],
mode='markers',
marker=marker))
fig.show()
return None
else:
self._logger.error("Input dataset contains less than 2 features. Insufficient data to plot.")
return None
#CLASS ANALYSIS TO PROCESS POST-SIMULATION DATA
class analysis:
# constructor taking in dataset (UI matrix), number of maximum clusters
def __init__(self, probas):
# assign list of proba dataframes
self.probas = probas
# Enable logging
self._logger = logging.getLogger(__name__)
self.clusters = self.probas[0]['cluster'].unique().tolist()
self.cluster_pop = pd.DataFrame()
def __str__(self):
return 'Analysis Object'
def rename_cluster(self):
# l and r are indexes of extreme left and extreme right users in synthetic dataset
# for each iteration i
for i in range(len(self.probas)):
# identify cluster names
groupA = int(self.probas[i][0:99].mode().cluster[0])
#print(groupA)
groupB = int(self.probas[i][900:999].mode().cluster[0])
#print(groupB)
#groupA = self.probas[i].loc[left_id,'cluster']
#groupB = self.probas[i].loc[right_id,'cluster']
if len(self.probas[i].cluster.unique()) == 3:
if groupA == groupB:
self._logger.warning("Left and Right Users are in the same cluster. They are both in cluster '1'. Cluster 0 and -1 are both random neutrals now")
#groupA = self.probas[i].loc[left_id,'cluster']
if (3-groupA) == 3:
#groupA is 0
groupB = 1
groupC = 2
else:
#groupA is 1 or 2
groupB = 3-groupA
groupC = 0
else:
groupC = 3-(groupA+groupB)
#check if it is just predictions or predictions and probabilities
if len(self.probas[i].columns) > 2:
# rename columns
self.probas[i].rename(columns={'proba_C'+str(groupA):1,'proba_C'+str(groupB):-1, 'proba_C'+str(groupC):0},inplace = True)
# rename clusters
self.probas[i]['cluster'] = self.probas[i]['cluster'].replace([groupA,groupB,groupC],[1,-1,0])
self.clusters = [-1,0,1]
elif len(self.probas[i].cluster.unique()) == 2:
if groupA == groupB:
self._logger.warning("Left and Right Users are in the same cluster. They are both in cluster '1'. Cluster 0 is random neutrals now")
#groupA = self.probas[i].loc[left_id,'cluster']
groupB = 1-groupA
#check if it is just predictions or predictions and probabilities
if len(self.probas[i].columns) > 2:
# rename columns
self.probas[i].rename(columns={'proba_C'+str(groupA):1,'proba_C'+str(groupB):0},inplace = True)
# rename clusters
self.probas[i]['cluster'] = self.probas[i]['cluster'].replace([groupA,groupB],[1,0])
self.clusters = [0,1]
return self.probas
# Function to calculate cluster composition
def cluster_populations(self):
if self.probas == None:
self._logger.error("List of probabilities is empty.")
return None
else:
self.cluster_pop = pd.DataFrame(index=range(1,len(self.probas)+1), columns=(self.clusters + ['total']))
for t in range(1,len(self.probas)+1):
for c in self.clusters:
self.cluster_pop.loc[t,c] = len(self.probas[t-1].loc[self.probas[t-1]['cluster']==c])
self.cluster_pop.loc[t,'total'] = len(self.probas[t-1])
return self.cluster_pop
def plot_counts(self, show=True, loc=None):
if self.cluster_pop.empty:
self.cluster_populations()
plt.clf()
for i in self.cluster_pop.columns:
plt.plot(self.cluster_pop.index,self.cluster_pop[i], label = i)
plt.xlabel('iteration')
# Set the y axis label of the current axis.
plt.ylabel('#users')
# Set a title of the current axes.
plt.title('Number of Users in each cluster over the simulation')
# show a legend on the plot
plt.legend()
if show:
# Display a figure.
plt.show(block=True)
else:
#save plt to loc
plt.savefig(loc)
def plot_percent(self, show=True, loc=None):
if self.cluster_pop.empty:
self.cluster_populations()
plt.clf()
for i in self.cluster_pop.columns[:-1]:
plt.plot(self.cluster_pop.index,(self.cluster_pop[i]/self.cluster_pop['total']), label = i)
plt.gca
plt.xlabel('iteration')
# Set the y axis label of the current axis.
plt.ylabel('#users')
# Set a title of the current axes.
plt.title('Fraction of Users in each cluster over the simulation')
# show a legend on the plot
plt.legend()
if show:
# Display a figure.
plt.show(block=True)
else:
#save plt to loc
plt.savefig(loc)
'''
# Function to calculate adjacency matrix of weighted graph of users. Default similarity algorithm is Jaccard
def adj_matrix(self, sim = "cosin"):
# calculate similarity matrix
if sim == "cosin":
print("cosin")
else:
self._logger.error("Invalid input for sim. Enter 'cosin' or 'euclidean'.")
return None
'''
class post_process():
# constructor taking in dataset (UI matrix), number of maximum clusters
def __init__(self, latent_list, results_list, UI):
assert len(latent_list) == len(results_list), "Length of latents and results does not match"
assert len(results_list[len(results_list)-1]) == len(UI[len(UI)-1]), "Number of users in final cluster results does not match the number of users in the final user-item interaction matrix"
# assign input list of DFs
self.latents = latent_list
self.results = results_list
self.UI = UI
# Enable logging
self._logger = logging.getLogger(__name__)
# Create Analysis Object for Clustering Probabilities
self.analysis_obj = analysis(self.results)
def __str__(self):
return 'Post Processing Object'
def rename_cluster(self,l,r):
# l and r are indexes of extreme left and extreme right users in synthetic dataset
return self.analysis_obj.rename_cluster()
# Function to calculate cluster composition
def cluster_populations(self):
return self.analysis_obj.cluster_populations()
def plot_counts(self, show=True, loc=None):
self.analysis_obj.plot_counts(show=show, loc=loc)
def plot_percent(self, show=True, loc=None):
self.analysis_obj.plot_percent(show=show, loc=loc)
def examine(self, i, algo):
# n is the number of clusters
# ensure number of
if i >= len(self.latents):
self._logger.error("Argument 'i' is out of bounds. It needs to be between 0 and %d" % (len(self.latents)-1))
return None
if not((algo == 'gmm') or (algo == 'kmeans')):
self._logger.error("Argument | |
with corporate actions information
**Usage**
Retrieve corporate actions for a basket across a date range
**Examples**
Retrieve historical acquisition corporate actions for a basket
>>> from gs_quant.markets.baskets import Basket
>>> from gs_quant.markets.indices_utils import CorporateActionType
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_corporate_actions(ca_type=[CorporateActionType.ACQUISITION])
**See also**
:func:`get_fundamentals`
"""
where = dict(assetId=self.id, corporateActionType=ca_type)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.CORPORATE_ACTIONS.value)
return pd.DataFrame(response)
@_validate(ErrorMessage.UNINITIALIZED)
def get_fundamentals(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today(),
period: DataMeasure = DataMeasure.ONE_YEAR.value,
direction: DataMeasure = DataMeasure.FORWARD.value,
metrics: List[DataMeasure] = DataMeasure.list_fundamentals()) -> pd.DataFrame:
"""
Retrieve fundamentals data for a basket across a date range
:param start: start date (default minimum date value)
:param end: end date (default is today)
:param period: period for the relevant metric (default is 1y)
:param direction: direction of the outlook period (default is forward)
:param metrics: list of fundamentals metrics (default is all)
:return: dataframe with fundamentals information
**Usage**
Retrieve fundamentals data for a basket across a date range
**Examples**
Retrieve historical dividend yield data for a basket
>>> from gs_quant.data.fields import DataMeasure
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_fundamentals(metrics=[DataMeasure.DIVIDEND_YIELD])
**See also**
:func:`get_corporate_actions`
"""
where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.BASKET_FUNDAMENTALS.value)
return pd.DataFrame(response)
@_validate(ErrorMessage.UNINITIALIZED)
def get_live_date(self) -> Optional[dt.date]:
"""
Retrieve basket's live date
**Usage**
Retrieve basket's live date
**Examples**
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_live_date()
"""
return self.__live_date
def get_type(self) -> Optional[SecAssetType]:
"""
Retrieve basket type
**Usage**
Retrieve basket type
**Examples**
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_type()
"""
if self.__gs_asset_type:
return SecAssetType[self.__gs_asset_type.name.upper()]
@_validate(ErrorMessage.UNINITIALIZED)
def get_url(self) -> str:
"""
Retrieve url to basket's product page in Marquee
**Usage**
Retrieve url to basket's product page in Marquee
**Examples**
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_url()
"""
env = '-dev' if 'dev' in get(GsSession, 'current.domain', '') else ''
env = '-qa' if 'qa' in get(GsSession, 'current.domain', '') else env
return f'https://marquee{env}.gs.com/s/products/{self.id}/summary'
@_validate(ErrorMessage.UNINITIALIZED, ErrorMessage.NON_ADMIN)
def add_factor_risk_report(self, risk_model_id: str, fx_hedged: bool):
"""
Create and schedule a new factor risk report for your basket
:param risk_model_id: risk model identifier
:param fx_hedged: Assume basket is FX hedged
**Usage**
Create and schedule a new factor risk report for your basket
**Examples**
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.add_factor_risk_report('AXUS4M', True)
**See also**
:func:`delete_factor_risk_report`
"""
payload = CustomBasketRiskParams(risk_model=risk_model_id, fx_hedged=fx_hedged)
return GsIndexApi.update_risk_reports(payload)
@_validate(ErrorMessage.UNINITIALIZED, ErrorMessage.NON_ADMIN)
def delete_factor_risk_report(self, risk_model_id: str):
"""
Delete an existing factor risk report for your basket
:param risk_model_id: risk model identifier for the report you'd like to delete
**Usage**
Delete an existing factor risk report for your basket
**Examples**
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.delete_factor_risk_report('AXUS4M')
**See also**
:func:`add_factor_risk_report`
"""
payload = CustomBasketRiskParams(risk_model=risk_model_id, delete=True)
return GsIndexApi.update_risk_reports(payload)
@property
def allow_ca_restricted_assets(self) -> Optional[bool]:
""" Allow basket to have constituents that will not be corporate action adjusted in the future """
return self.__allow_ca_restricted_assets
@allow_ca_restricted_assets.setter
@_validate(ErrorMessage.NON_ADMIN)
def allow_ca_restricted_assets(self, value: bool):
self.__allow_ca_restricted_assets = value
@property
def allow_limited_access_assets(self) -> Optional[bool]:
""" Allow basket to have constituents that GS has limited access to """
return self.__allow_limited_access_assets
@allow_limited_access_assets.setter
@_validate(ErrorMessage.NON_ADMIN)
def allow_limited_access_assets(self, value: bool):
self.__allow_limited_access_assets = value
@property
def clone_parent_id(self) -> Optional[str]:
""" Marquee Id of the source basket, in case basket composition is sourced from another marquee basket """
return self.__clone_parent_id
@property
def currency(self) -> Optional[IndicesCurrency]:
""" Denomination of the basket """
return self.__currency
@currency.setter
@_validate(ErrorMessage.UNMODIFIABLE)
def currency(self, value: IndicesCurrency):
self.__currency = value
@property
def default_backcast(self) -> Optional[bool]:
""" If basket should be backcasted using the current composition """
return self.__default_backcast
@default_backcast.setter
@_validate(ErrorMessage.UNMODIFIABLE)
def default_backcast(self, value: bool):
self.__default_backcast = value
@property
def description(self) -> Optional[str]:
""" Free text description of basket """
return self.__description
@description.setter
@_validate(ErrorMessage.NON_ADMIN)
def description(self, value: str):
self.__description = value
@property
@_validate()
def divisor(self) -> Optional[float]:
""" Divisor to be applied to the overall position set """
return self.__divisor
@divisor.setter
@_validate(ErrorMessage.NON_ADMIN)
def divisor(self, value: float):
self.__initial_price = None
self.__divisor = value
@property
@_validate()
def entitlements(self) -> Optional[BasketEntitlements]:
""" Basket entitlements """
return self.__entitlements
@entitlements.setter
@_validate(ErrorMessage.NON_ADMIN)
def entitlements(self, value: BasketEntitlements):
self.__entitlements = value
@property
@_validate(ErrorMessage.NON_INTERNAL)
def flagship(self) -> Optional[bool]:
""" If the basket is flagship (internal only) """
return self.__flagship
@flagship.setter
@_validate(ErrorMessage.NON_INTERNAL)
def flagship(self, value: bool):
self.__flagship = value
@property
def hedge_id(self) -> Optional[str]:
""" Marquee Id of the source hedge, in case current basket composition is sourced from marquee hedge """
return self.__hedge_id
@property
def include_price_history(self) -> Optional[bool]:
""" Include full price history when publishing to Bloomberg """
return self.__include_price_history
@include_price_history.setter
@_validate(ErrorMessage.NON_ADMIN)
def include_price_history(self, value: bool):
self.__include_price_history = value
@property
@_validate()
def initial_price(self) -> Optional[float]:
""" Initial price the basket it should start ticking at """
return self.__initial_price
@initial_price.setter
@_validate(ErrorMessage.NON_ADMIN)
def initial_price(self, value: float):
self.__divisor = None
self.__initial_price = value
@property
def name(self) -> Optional[str]:
""" Display name of the basket (must be <= 24 characters)"""
return self.__name
@name.setter
@_validate(ErrorMessage.NON_ADMIN)
def name(self, value: str):
if len(value) > 24:
raise MqValueError(f'Basket name of {len(value)} characters is too long (must be <= 24 characters).')
self.__name = value
@property
def parent_basket(self) -> Optional[str]:
""" Ticker of the source basket, in case current basket composition is sourced from another marquee basket """
if has(self, '__clone_parent_id') and not has(self, '__parent_basket'):
self.__parent_basket = get(GsAssetApi.get_asset(self.__clone_parent_id), 'id')
return self.__parent_basket
@parent_basket.setter
@_validate(ErrorMessage.UNMODIFIABLE)
def parent_basket(self, value: str):
self.__clone_parent_id = get(__get_gs_asset(value), 'id')
self.__parent_basket = value
@property
@_validate()
def position_set(self) -> Optional[PositionSet]:
""" Information of constituents associated with the basket """
return self.__position_set
@position_set.setter
@_validate(ErrorMessage.NON_ADMIN)
def position_set(self, value: PositionSet):
self.__position_set = value
@property
@_validate()
def publish_to_bloomberg(self) -> Optional[bool]:
""" If the basket should be published to Bloomberg """
return self.__publish_to_bloomberg
@publish_to_bloomberg.setter
@_validate(ErrorMessage.NON_ADMIN)
def publish_to_bloomberg(self, value: bool):
self.__publish_to_bloomberg = value
@property
@_validate()
def publish_to_factset(self) -> Optional[bool]:
""" If the basket should be published to Factset """
return self.__publish_to_factset
@publish_to_factset.setter
@_validate(ErrorMessage.NON_ADMIN)
def publish_to_factset(self, value: bool):
self.__publish_to_factset = value
@property
def publish_to_reuters(self) -> Optional[bool]:
""" If the basket should be published to Reuters """
return self.__publish_to_reuters
@publish_to_reuters.setter
@_validate(ErrorMessage.NON_ADMIN)
def publish_to_reuters(self, value: bool):
self.__publish_to_reuters = value
@property
def return_type(self) -> Optional[ReturnType]:
""" Determines the index calculation methodology with respect to dividend reinvestment """
return self.__return_type
@return_type.setter
@_validate(ErrorMessage.NON_ADMIN)
def return_type(self, value: ReturnType):
self.__return_type = value
@property
def reweight(self) -> Optional[bool]:
""" To reweight positions if input weights don't add up to 1 """
return self.__reweight
@reweight.setter
@_validate(ErrorMessage.NON_ADMIN)
def reweight(self, value: bool):
self.__reweight = value
@property
def target_notional(self) -> Optional[float]:
""" Target notional for the position set """
return self.__target_notional
@target_notional.setter
@_validate(ErrorMessage.NON_ADMIN)
def target_notional(self, value: float):
self.__target_notional = value
@property
def ticker(self) -> Optional[str]:
""" Associated 8-character basket identifier """
return self.__ticker
@ticker.setter
@_validate(ErrorMessage.UNMODIFIABLE)
def ticker(self, value: str):
self.__validate_ticker(value)
self.__ticker = value
@property
def weighting_strategy(self) -> Optional[WeightingStrategy]:
""" Strategy used to price the position set """
return self.__weighting_strategy
@weighting_strategy.setter
@_validate(ErrorMessage.NON_ADMIN)
def weighting_strategy(self, value: WeightingStrategy):
self.__weighting_strategy = value
def __edit_and_rebalance(self, edit_inputs: CustomBasketsEditInputs,
rebal_inputs: CustomBasketsRebalanceInputs) -> CustomBasketsResponse:
""" If updates require edit and rebalance, rebal will not be scheduled until/if edit report succeeds """
_logger.info('Current update request requires multiple reports. Your rebalance request will be submitted \
once the edit report has completed. Submitting basket edits now...')
response = GsIndexApi.edit(self.id, edit_inputs)
report_id = response.report_id
self.__latest_create_report = GsReportApi.get_report(response.report_id)
report_status = self.poll_report(report_id, timeout=600, step=15)
if report_status != ReportStatus.done:
raise MqError(f'The basket edit report\'s status is {status}. The current rebalance request will \
not be submitted in the meantime.')
_logger.info('Your basket edits have completed successfuly. Submitting rebalance request now...')
response = GsIndexApi.rebalance(self.id, rebal_inputs)
return response
def __finish_initialization(self):
""" Fetches remaining data not retrieved during basket initialization """
if has(self, 'id'):
if not has(self, '__initial_positions'):
position_set = GsAssetApi.get_latest_positions(self.id, PositionType.ANY)
position_set = PositionSet.from_target(position_set)
self.__position_set = position_set
self.__divisor = get(position_set, 'divisor')
self.__initial_positions = set(deepcopy(self.__position_set.positions))
set_(self.__initial_state, 'divisor', self.__divisor)
set_(self.__initial_state, 'position_set', self.__position_set)
if not has(self.__initial_state, 'initial_price'):
initial_price = GsIndexApi.initial_price(self.id, dt.date.today())
self.__initial_price = get(initial_price, 'price')
set_(self.__initial_state, 'initial_price', self.__initial_price)
if not has(self.__initial_state, 'publish_to_bloomberg'):
report = get(self, '__latest_create_report', self.__get_latest_create_report())
self.__publish_to_bloomberg = get(report, 'parameters.publish_to_bloomberg')
self.__publish_to_factset = get(report, 'parameters.publish_to_factset')
self.__publish_to_reuters | |
"""Supporting functions for the 'fix' command."""
import logging
import numpy as np
import pandas as pd
from . import descriptives, params, smoothing
def do_fix(target_raw, antitarget_raw, reference,
do_gc=True, do_edge=True, do_rmask=True, do_cluster=False):
"""Combine target and antitarget coverages and correct for biases."""
# Load, recenter and GC-correct target & antitarget probes separately
logging.info("Processing target: %s", target_raw.sample_id)
cnarr, ref_matched = load_adjust_coverages(target_raw, reference,
True, do_gc, do_edge, False)
logging.info("Processing antitarget: %s", antitarget_raw.sample_id)
anti_cnarr, ref_anti = load_adjust_coverages(antitarget_raw, reference,
False, do_gc, False, do_rmask)
if len(anti_cnarr):
# Combine target and antitarget bins
cnarr.add(anti_cnarr)
ref_matched.add(ref_anti)
# Find reference clusters, if requested
log2_key = 'log2'
spread_key = 'spread'
if do_cluster:
ref_log2_cols = [col for col in ref_matched.data.columns
if col == "log2"
or col.startswith("log2")]
if len(ref_log2_cols) == 1:
logging.info("Reference does not contain any sub-clusters; "
"using %s", log2_key)
else:
# Get correlations between test sample and each reference cluster
corr_coefs = np.array([cnarr.log2.corr(ref_matched[ref_col])
for ref_col in ref_log2_cols])
ordered = [(k, r) for r, k in sorted(zip(corr_coefs, ref_log2_cols),
reverse=True)]
logging.info("Correlations with each cluster:\n\t%s",
"\n\t".join(["{}\t: {}".format(k, r)
for k, r in ordered]))
log2_key = ordered[0][0]
if log2_key.startswith('log2_'):
suffix = log2_key.split('_', 1)[1]
spread_key = 'spread_' + suffix
logging.info(" -> Choosing columns %r and %r", log2_key, spread_key)
# Normalize coverages according to the reference
# (Subtract the reference log2 copy number to get the log2 ratio)
cnarr.data['log2'] -= ref_matched[log2_key]
cnarr = apply_weights(cnarr, ref_matched, log2_key, spread_key)
cnarr.center_all(skip_low=True)
return cnarr
def load_adjust_coverages(cnarr, ref_cnarr, skip_low,
fix_gc, fix_edge, fix_rmask):
"""Load and filter probe coverages; correct using reference and GC."""
if 'gc' in cnarr:
# Don't choke on Picard-derived files that have the GC column
cnarr = cnarr.keep_columns(cnarr._required_columns + ('depth',))
# No corrections needed if there are no data rows (e.g. no antitargets)
if not len(cnarr):
return cnarr, ref_cnarr[:0]
ref_matched = match_ref_to_sample(ref_cnarr, cnarr)
# Drop bins that had poor coverage in the pooled reference
ok_cvg_indices = ~mask_bad_bins(ref_matched)
logging.info("Keeping %d of %d bins", sum(ok_cvg_indices), len(ref_matched))
cnarr = cnarr[ok_cvg_indices]
ref_matched = ref_matched[ok_cvg_indices]
# Apply corrections for known systematic biases in coverage
cnarr.center_all(skip_low=skip_low)
# Skip bias corrections if most bins have no coverage (e.g. user error)
if (cnarr['log2'] > params.NULL_LOG2_COVERAGE - params.MIN_REF_COVERAGE
).sum() <= len(cnarr) // 2:
logging.warning("WARNING: most bins have no or very low coverage; "
"check that the right BED file was used")
else:
cnarr_index_reset = False
if fix_gc:
if 'gc' in ref_matched:
logging.info("Correcting for GC bias...")
cnarr = center_by_window(cnarr, .1, ref_matched['gc'])
cnarr_index_reset = True
else:
logging.warning("WARNING: Skipping correction for GC bias")
if fix_edge:
logging.info("Correcting for density bias...")
edge_bias = get_edge_bias(cnarr, params.INSERT_SIZE)
cnarr = center_by_window(cnarr, .1, edge_bias)
cnarr_index_reset = True
if fix_rmask:
if 'rmask' in ref_matched:
logging.info("Correcting for RepeatMasker bias...")
cnarr = center_by_window(cnarr, .1, ref_matched['rmask'])
cnarr_index_reset = True
else:
logging.warning("WARNING: Skipping correction for "
"RepeatMasker bias")
if cnarr_index_reset:
ref_matched.data.reset_index(drop=True, inplace=True)
return cnarr, ref_matched
def mask_bad_bins(cnarr):
"""Flag the bins with excessively low or inconsistent coverage.
Returns
-------
np.array
A boolean array where True indicates bins that failed the checks.
"""
mask = ((cnarr['log2'] < params.MIN_REF_COVERAGE) |
(cnarr['log2'] > -params.MIN_REF_COVERAGE) |
(cnarr['spread'] > params.MAX_REF_SPREAD))
if 'depth' in cnarr:
mask |= cnarr['depth'] == 0
if 'gc' in cnarr:
mask |= (cnarr['gc'] > .7) | (cnarr['gc'] < .3)
return mask
def match_ref_to_sample(ref_cnarr, samp_cnarr):
"""Filter the reference bins to match the sample (target or antitarget)."""
# Assign each bin a unique string ID based on genomic coordinates
samp_labeled = samp_cnarr.data.set_index(pd.Index(samp_cnarr.coords()))
ref_labeled = ref_cnarr.data.set_index(pd.Index(ref_cnarr.coords()))
for dset, name in ((samp_labeled, "sample"),
(ref_labeled, "reference")):
dupes = dset.index.duplicated()
if dupes.any():
raise ValueError(("Duplicated genomic coordinates in {} set. Total duplicated regions: {}, starting with:\n"
"{}.").format(name, len(dset.index[dupes]), "\n".join(map(str, dset.index[dupes][:10]))))
# Take the reference bins with IDs identical to those in the sample
ref_matched = ref_labeled.reindex(index=samp_labeled.index)
# Check for signs that the wrong reference was used
num_missing = pd.isnull(ref_matched.start).sum()
if num_missing > 0:
raise ValueError("Reference is missing %d bins found in %s"
% (num_missing, samp_cnarr.sample_id))
x = ref_cnarr.as_dataframe(ref_matched.reset_index(drop=True)
.set_index(samp_cnarr.data.index))
return x
def center_by_window(cnarr, fraction, sort_key):
"""Smooth out biases according to the trait specified by sort_key.
E.g. correct GC-biased bins by windowed averaging across similar-GC
bins; or for similar interval sizes.
"""
# Separate neighboring bins that could have the same key
# (to avoid re-centering actual CNV regions -- only want an independently
# sampled subset of presumably overall-CN-neutral bins)
df = cnarr.data.reset_index(drop=True)
np.random.seed(0xA5EED)
shuffle_order = np.random.permutation(df.index)
#df = df.reindex(shuffle_order)
df = df.iloc[shuffle_order]
# Apply the same shuffling to the key array as to the target probe set
if isinstance(sort_key, pd.Series):
# XXX why
sort_key = sort_key.values
sort_key = sort_key[shuffle_order]
# Sort the data according to the specified parameter
order = np.argsort(sort_key, kind='mergesort')
df = df.iloc[order]
biases = smoothing.rolling_median(df['log2'], fraction)
# biases = smoothing.savgol(df['log2'], fraction)
df['log2'] -= biases
fixarr = cnarr.as_dataframe(df)
fixarr.sort()
return fixarr
def get_edge_bias(cnarr, margin):
"""Quantify the "edge effect" of the target tile and its neighbors.
The result is proportional to the change in the target's coverage due to
these edge effects, i.e. the expected loss of coverage near the target
edges and, if there are close neighboring tiles, gain of coverage due
to "spill over" reads from the neighbor tiles.
(This is not the actual change in coverage. This is just a tribute.)
"""
output_by_chrom = []
for _chrom, subarr in cnarr.by_chromosome():
tile_starts = subarr['start'].values
tile_ends = subarr['end'].values
tgt_sizes = tile_ends - tile_starts
# Calculate coverage loss at (both edges of) each tile
losses = edge_losses(tgt_sizes, margin)
# Find tiled intervals within a margin (+/- bp) of the given probe
# (excluding the probe itself), then calculate the relative coverage
# "gain" due to the neighbors, if any
gap_sizes = tile_starts[1:] - tile_ends[:-1]
ok_gaps_mask = (gap_sizes < margin)
ok_gaps = gap_sizes[ok_gaps_mask]
left_gains = edge_gains(tgt_sizes[1:][ok_gaps_mask], ok_gaps, margin)
right_gains = edge_gains(tgt_sizes[:-1][ok_gaps_mask], ok_gaps, margin)
gains = np.zeros(len(subarr))
gains[np.concatenate([[False], ok_gaps_mask])] += left_gains
gains[np.concatenate([ok_gaps_mask, [False]])] += right_gains
output_by_chrom.append(gains - losses)
return pd.Series(np.concatenate(output_by_chrom), index=cnarr.data.index)
def edge_losses(target_sizes, insert_size):
"""Calculate coverage losses at the edges of baited regions.
Letting i = insert size and t = target size, the proportional loss of
coverage near the two edges of the baited region (combined) is:
.. math :: i/2t
If the "shoulders" extend outside the bait $(t < i), reduce by:
.. math :: (i-t)^2 / 4it
on each side, or (i-t)^2 / 2it total.
"""
losses = insert_size / (2 * target_sizes)
# Drop the shoulder part that would extend past the bait
small_mask = (target_sizes < insert_size)
t_small = target_sizes[small_mask]
losses[small_mask] -= ((insert_size - t_small)**2
/ (2 * insert_size * t_small))
return losses
def edge_gains(target_sizes, gap_sizes, insert_size):
"""Calculate coverage gain from neighboring baits' flanking reads.
Letting i = insert size, t = target size, g = gap to neighboring bait,
the gain of coverage due to a nearby bait, if g < i, is::
.. math :: (i-g)^2 / 4it
If the neighbor flank extends beyond the target (t+g < i), reduce by::
.. math :: (i-t-g)^2 / 4it
If a neighbor overlaps the target, treat it as adjacent (gap size 0).
"""
if not (gap_sizes <= insert_size).all():
raise ValueError("Gaps greater than insert size:\n" +
gap_sizes[gap_sizes > insert_size].head())
gap_sizes = np.maximum(0, gap_sizes)
gains = ((insert_size - gap_sizes)**2
/ (4 * insert_size * target_sizes))
# Drop the flank part that extends past this baited region
past_other_side_mask = (target_sizes + gap_sizes < insert_size)
g_past = gap_sizes[past_other_side_mask]
t_past = target_sizes[past_other_side_mask]
gains[past_other_side_mask] -= ((insert_size - t_past - g_past)**2
/ (4 * insert_size * t_past))
return gains
def apply_weights(cnarr, ref_matched, log2_key, spread_key, epsilon=1e-4):
"""Calculate weights for each bin.
Bin weight is an estimate of (1 - variance) and within the range
``(0, 1]``.
Weights are derived from:
- Each bin's size
- Sample's genome-wide average (on/off-target) coverage depth
- Sample's genome-wide observed (on/off-target) bin variances
And with a pooled reference:
- Each bin's coverage depth in the reference
- The "spread" column of the reference (approx. stdev)
These estimates of variance assume the number of aligned reads per bin
follows a Poisson distribution, approximately log-normal.
Parameters
----------
cnarr : CopyNumArray
Sample bins.
ref_match : CopyNumArray
Reference bins.
log2_key : string
The 'log2' column name in the reference to | |
import time
import logging
import json
import copy
import math
import base64
import grpc
from grpc._cython import cygrpc
from ..grpc_gen import milvus_pb2_grpc
from ..grpc_gen import milvus_pb2 as milvus_types
from ..grpc_gen import common_pb2
from .abstract import CollectionSchema, ChunkedQueryResult, MutationResult
from .check import (
is_legal_host,
is_legal_port,
check_pass_param,
is_legal_index_metric_type,
is_legal_binary_index_metric_type,
)
from .prepare import Prepare
from .types import (
Status,
ErrorCode,
IndexState,
DataType,
CompactionState,
State,
CompactionPlans,
Plan,
get_consistency_level,
Replica, Shard, Group,
BulkLoadState,
)
from .utils import (
valid_index_types,
valid_binary_index_types,
valid_index_params_keys,
check_invalid_binary_vector,
len_of
)
from ..settings import DefaultConfig as config
from .configs import DefaultConfigs
from . import ts_utils
from . import interceptor
from .asynch import (
SearchFuture,
MutationFuture,
CreateIndexFuture,
CreateFlatIndexFuture,
FlushFuture,
LoadPartitionsFuture,
ChunkedSearchFuture
)
from ..exceptions import (
ParamError,
CollectionNotExistException,
DescribeCollectionException,
MilvusException,
)
from ..decorators import retry_on_rpc_failure, error_handler
LOGGER = logging.getLogger(__name__)
class GrpcHandler:
def __init__(self, uri=config.GRPC_ADDRESS, host=None, port=None, channel=None, **kwargs):
self._stub = None
self._channel = channel
if host is not None and port is not None \
and is_legal_host(host) and is_legal_port(port):
self._uri = f"{host}:{port}"
else:
self._uri = uri
self._max_retry = kwargs.get("max_retry", 5)
self._secure = kwargs.get("secure", False)
self._client_pem_path = kwargs.get("client_pem_path", "")
self._client_key_path = kwargs.get("client_key_path", "")
self._ca_pem_path = kwargs.get("ca_pem_path", "")
self._server_name = kwargs.get("server_name", "")
self._authorization_interceptor = None
self._setup_authorization_interceptor(kwargs.get("user", None), kwargs.get("password", None))
self._setup_grpc_channel()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def _wait_for_channel_ready(self):
if self._channel is not None:
try:
grpc.channel_ready_future(self._channel).result(timeout=3)
return
except grpc.FutureTimeoutError:
raise MilvusException(Status.CONNECT_FAILED, f'Fail connecting to server on {self._uri}. Timeout')
raise MilvusException(Status.CONNECT_FAILED, 'No channel in handler, please setup grpc channel first')
def close(self):
self._channel.close()
def _setup_authorization_interceptor(self, user, password):
if user and password:
authorization = base64.b64encode(f"{user}:{password}".encode('utf-8'))
key = "authorization"
self._authorization_interceptor = interceptor.header_adder_interceptor(key, authorization)
def _setup_grpc_channel(self):
""" Create a ddl grpc channel """
if self._channel is None:
if not self._secure:
self._channel = grpc.insecure_channel(
self._uri,
options=[(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1),
('grpc.enable_retries', 1),
('grpc.keepalive_time_ms', 55000)]
)
else:
opts = [(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1),
('grpc.enable_retries', 1),
('grpc.keepalive_time_ms', 55000)]
if self._client_pem_path != "" and self._client_key_path != "" and self._ca_pem_path != "" \
and self._server_name != "":
opts.append(('grpc.ssl_target_name_override', self._server_name, ),)
with open(self._client_pem_path, 'rb') as f:
certificate_chain = f.read()
with open(self._client_key_path, 'rb') as f:
private_key = f.read()
with open(self._ca_pem_path, 'rb') as f:
root_certificates = f.read()
creds = grpc.ssl_channel_credentials(root_certificates, private_key, certificate_chain)
else:
creds = grpc.ssl_channel_credentials(root_certificates=None, private_key=None,
certificate_chain=None)
self._channel = grpc.secure_channel(
self._uri,
creds,
options=opts
)
# avoid to add duplicate headers.
self._final_channel = self._channel
if self._authorization_interceptor:
self._final_channel = grpc.intercept_channel(self._channel, self._authorization_interceptor)
self._stub = milvus_pb2_grpc.MilvusServiceStub(self._final_channel)
@property
def server_address(self):
""" Server network address """
return self._uri
def reset_password(self, user, old_password, new_password):
"""
reset password and then setup the grpc channel.
"""
self.update_credential(user, old_password, new_password)
self._setup_authorization_interceptor(user, new_password)
self._setup_grpc_channel()
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def create_collection(self, collection_name, fields, shards_num=2, timeout=None, **kwargs):
request = Prepare.create_collection_request(collection_name, fields, shards_num=shards_num, **kwargs)
# TODO(wxyu): In grpcio==1.37.1, `wait_for_ready` is an EXPERIMENTAL argument, while it's not supported in
# grpcio-testing==1.37.1 . So that we remove the argument in order to using grpc-testing in unittests.
# rf = self._stub.CreateCollection.future(request, wait_for_ready=True, timeout=timeout)
rf = self._stub.CreateCollection.future(request, timeout=timeout)
if kwargs.get("_async", False):
return rf
status = rf.result()
if status.error_code != 0:
LOGGER.error(status)
raise MilvusException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1, retry_on_deadline=False)
@error_handler
def drop_collection(self, collection_name, timeout=None):
check_pass_param(collection_name=collection_name)
request = Prepare.drop_collection_request(collection_name)
rf = self._stub.DropCollection.future(request, wait_for_ready=True, timeout=timeout)
status = rf.result()
if status.error_code != 0:
raise MilvusException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def has_collection(self, collection_name, timeout=None, **kwargs):
check_pass_param(collection_name=collection_name)
request = Prepare.has_collection_request(collection_name)
rf = self._stub.HasCollection.future(request, timeout=timeout)
reply = rf.result()
if reply.status.error_code == common_pb2.Success:
return reply.value
raise MilvusException(reply.status.error_code, reply.status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def describe_collection(self, collection_name, timeout=None, **kwargs):
check_pass_param(collection_name=collection_name)
request = Prepare.describe_collection_request(collection_name)
rf = self._stub.DescribeCollection.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
status = response.status
if status.error_code == 0:
return CollectionSchema(raw=response).dict()
raise DescribeCollectionException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def list_collections(self, timeout=None):
request = Prepare.show_collections_request()
rf = self._stub.ShowCollections.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
status = response.status
if response.status.error_code == 0:
return list(response.collection_names)
raise MilvusException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def create_partition(self, collection_name, partition_name, timeout=None):
check_pass_param(collection_name=collection_name, partition_name=partition_name)
request = Prepare.create_partition_request(collection_name, partition_name)
rf = self._stub.CreatePartition.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.error_code != 0:
raise MilvusException(response.error_code, response.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def drop_partition(self, collection_name, partition_name, timeout=None):
check_pass_param(collection_name=collection_name, partition_name=partition_name)
request = Prepare.drop_partition_request(collection_name, partition_name)
rf = self._stub.DropPartition.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.error_code != 0:
raise MilvusException(response.error_code, response.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def has_partition(self, collection_name, partition_name, timeout=None):
check_pass_param(collection_name=collection_name, partition_name=partition_name)
request = Prepare.has_partition_request(collection_name, partition_name)
rf = self._stub.HasPartition.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
status = response.status
if status.error_code == 0:
return response.value
raise MilvusException(status.error_code, status.reason)
# TODO: this is not inuse
@error_handler
def get_partition_info(self, collection_name, partition_name, timeout=None):
request = Prepare.partition_stats_request(collection_name, partition_name)
rf = self._stub.DescribePartition.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
status = response.status
if status.error_code == 0:
statistics = response.statistics
info_dict = dict()
for kv in statistics:
info_dict[kv.key] = kv.value
return info_dict
raise MilvusException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def list_partitions(self, collection_name, timeout=None):
check_pass_param(collection_name=collection_name)
request = Prepare.show_partitions_request(collection_name)
rf = self._stub.ShowPartitions.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
status = response.status
if status.error_code == 0:
return list(response.partition_names)
raise MilvusException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def get_partition_stats(self, collection_name, partition_name, timeout=None, **kwargs):
check_pass_param(collection_name=collection_name)
index_param = Prepare.get_partition_stats_request(collection_name, partition_name)
future = self._stub.GetPartitionStatistics.future(index_param, wait_for_ready=True, timeout=timeout)
response = future.result()
status = response.status
if status.error_code == 0:
return response.stats
raise MilvusException(status.error_code, status.reason)
def _prepare_bulk_insert_request(self, collection_name, entities, partition_name=None, timeout=None, **kwargs):
insert_param = kwargs.get('insert_param', None)
if insert_param and not isinstance(insert_param, milvus_types.RowBatch):
raise ParamError("The value of key 'insert_param' is invalid")
if not isinstance(entities, list):
raise ParamError("None entities, please provide valid entities.")
collection_schema = self.describe_collection(collection_name, timeout=timeout, **kwargs)
fields_info = collection_schema["fields"]
request = insert_param if insert_param \
else Prepare.bulk_insert_param(collection_name, entities, partition_name, fields_info)
return request
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def bulk_insert(self, collection_name, entities, partition_name=None, timeout=None, **kwargs):
if not check_invalid_binary_vector(entities):
raise ParamError("Invalid binary vector data exists")
try:
request = self._prepare_bulk_insert_request(collection_name, entities, partition_name, timeout, **kwargs)
rf = self._stub.Insert.future(request, wait_for_ready=True, timeout=timeout)
if kwargs.get("_async", False) is True:
cb = kwargs.get("_callback", None)
f = MutationFuture(rf, cb, timeout=timeout, **kwargs)
f.add_callback(ts_utils.update_ts_on_mutation(collection_name))
return f
response = rf.result()
if response.status.error_code == 0:
m = MutationResult(response)
ts_utils.update_collection_ts(collection_name, m.timestamp)
return m
raise MilvusException(response.status.error_code, response.status.reason)
except Exception as err:
if kwargs.get("_async", False):
return MutationFuture(None, None, err)
raise err
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def delete(self, collection_name, expression, partition_name=None, timeout=None, **kwargs):
check_pass_param(collection_name=collection_name)
try:
req = Prepare.delete_request(collection_name, partition_name, expression)
future = self._stub.Delete.future(req, wait_for_ready=True, timeout=timeout)
response = future.result()
if response.status.error_code == 0:
m = MutationResult(response)
ts_utils.update_collection_ts(collection_name, m.timestamp)
return m
raise MilvusException(response.status.error_code, response.status.reason)
except Exception as err:
if kwargs.get("_async", False):
return MutationFuture(None, None, err)
raise err
@error_handler
def _execute_search_requests(self, requests, timeout=None, **kwargs):
auto_id = kwargs.get("auto_id", True)
try:
raws = []
futures = []
# step 1: get future object
for request in requests:
ft = self._stub.Search.future(request, wait_for_ready=True, timeout=timeout)
futures.append(ft)
if kwargs.get("_async", False):
func = kwargs.get("_callback", None)
return ChunkedSearchFuture(futures, func, auto_id)
# step2: get results
for ft in futures:
response = ft.result()
if response.status.error_code != 0:
raise MilvusException(response.status.error_code, response.status.reason)
raws.append(response)
round_decimal = kwargs.get("round_decimal", -1)
return ChunkedQueryResult(raws, auto_id, round_decimal)
except Exception as pre_err:
if kwargs.get("_async", False):
return SearchFuture(None, None, True, pre_err)
raise pre_err
@retry_on_rpc_failure(retry_times=10, wait=1, retry_on_deadline=False)
@error_handler
def search(self, collection_name, data, anns_field, param, limit,
expression=None, partition_names=None, output_fields=None,
round_decimal=-1, timeout=None, **kwargs):
check_pass_param(
limit=limit,
round_decimal=round_decimal,
anns_field=anns_field,
search_data=data,
partition_name_array=partition_names,
output_fields=output_fields,
travel_timestamp=kwargs.get("travel_timestamp", 0),
guarantee_timestamp=kwargs.get("guarantee_timestamp", 0)
)
if not self.has_collection(collection_name, timeout):
raise CollectionNotExistException(ErrorCode.CollectionNotExists,
f"collection {collection_name} doesn't exist!")
_kwargs = copy.deepcopy(kwargs)
collection_schema = self.describe_collection(collection_name, timeout)
auto_id = collection_schema["auto_id"]
consistency_level = collection_schema["consistency_level"]
# overwrite the consistency level defined when user created the collection
consistency_level = get_consistency_level(_kwargs.get("consistency_level", consistency_level))
_kwargs["schema"] = collection_schema
ts_utils.construct_guarantee_ts(consistency_level, collection_name, _kwargs)
requests = Prepare.search_requests_with_expr(collection_name, data, anns_field, param, limit, expression,
partition_names, output_fields, round_decimal, **_kwargs)
_kwargs.pop("schema")
_kwargs["auto_id"] = auto_id
_kwargs["round_decimal"] = round_decimal
return self._execute_search_requests(requests, timeout, **_kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def get_query_segment_info(self, collection_name, timeout=30, **kwargs):
req = Prepare.get_query_segment_info_request(collection_name)
future = self._stub.GetQuerySegmentInfo.future(req, wait_for_ready=True, timeout=timeout)
response = future.result()
status = response.status
if status.error_code == 0:
return response.infos # todo: A wrapper class of QuerySegmentInfo
raise MilvusException(status.error_code, status.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def create_alias(self, collection_name, alias, timeout=None, **kwargs):
check_pass_param(collection_name=collection_name)
request = Prepare.create_alias_request(collection_name, alias)
rf = self._stub.CreateAlias.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.error_code != 0:
raise MilvusException(response.error_code, response.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def drop_alias(self, alias, timeout=None, **kwargs):
request = Prepare.drop_alias_request(alias)
rf = self._stub.DropAlias.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.error_code != 0:
raise MilvusException(response.error_code, response.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def alter_alias(self, collection_name, alias, timeout=None, **kwargs):
check_pass_param(collection_name=collection_name)
request = Prepare.alter_alias_request(collection_name, alias)
rf = self._stub.AlterAlias.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.error_code != 0:
raise MilvusException(response.error_code, response.reason)
@retry_on_rpc_failure(retry_times=10, wait=1)
@error_handler
def create_index(self, collection_name, field_name, params, timeout=None, **kwargs):
# for historical | |
<reponame>dbmi-bgm/cgap-portal
import json
import mock
import pytest
import webtest
from datetime import datetime, timedelta
from dcicutils.misc_utils import Retry, ignored
from dcicutils.qa_utils import notice_pytest_fixtures, local_attrs
from pyramid.httpexceptions import HTTPBadRequest
from snovault import TYPES, COLLECTIONS
from snovault.elasticsearch import create_mapping
from snovault.elasticsearch.indexer_utils import get_namespaced_index
from snovault.schema_utils import load_schema
from snovault.util import add_default_embeds
from webtest import AppError
from ..search.lucene_builder import LuceneBuilder
from ..search.search_utils import find_nested_path
pytestmark = [pytest.mark.working, pytest.mark.schema, pytest.mark.search, pytest.mark.workbook]
### IMPORTANT
# uses the inserts in ./data/workbook_inserts
# design your tests accordingly
# just a little helper function
def recursively_find_uuids(json, uuids):
for key, val in json.items():
if key == 'uuid':
uuids.add(val)
elif isinstance(val, list):
for item in val:
if isinstance(item, dict):
uuids = recursively_find_uuids(item, uuids)
elif isinstance(val, dict):
uuids = recursively_find_uuids(val, uuids)
return uuids
def test_search_view(workbook, es_testapp):
""" Test basic things about search view """
notice_pytest_fixtures(workbook)
res = es_testapp.get('/search/?type=Item').json
assert res['@type'] == ['ItemSearchResults', 'Search']
assert res['@id'] == '/search/?type=Item'
assert res['@context'] == '/terms/'
assert res['notification'] == 'Success'
assert res['title'] == 'Search'
assert res['total'] > 0
assert 'facets' in res
# type facet should always have > 1 option here, even when it is selected
for facet in res['facets']:
if facet['field'] == 'type':
assert len(facet['terms']) > 1
break
assert 'filters' in res
assert '@graph' in res
def test_search_with_no_query(workbook, es_testapp):
"""
using /search/ (with no query) should default to /search/?type=Item
thus, should satisfy same assertions as test_search_view
"""
notice_pytest_fixtures(workbook)
res = es_testapp.get('/search/').follow(status=200)
assert res.json['@type'] == ['ItemSearchResults', 'Search']
assert res.json['@id'] == '/search/?type=Item'
assert res.json['@context'] == '/terms/'
assert res.json['notification'] == 'Success'
assert res.json['title'] == 'Search'
assert res.json['total'] > 0
assert 'facets' in res
# test default facets (data type)
default_facets = [facet['field'] for facet in res.json['facets']]
assert 'type' in default_facets
# assert 'status' in default_facets uncomment this if status is added back -Will 5/13/2020
assert 'filters' in res
assert '@graph' in res
def test_collections_redirect_to_search(workbook, es_testapp):
"""
we removed the collections page and redirect to search of that type
redirected_from is not used for search
"""
res = es_testapp.get('/user/', status=301).follow(status=200)
assert res.json['@type'] == ['UserSearchResults', 'ItemSearchResults', 'Search']
assert res.json['@id'] == '/search/?type=User'
assert 'redirected_from' not in res.json['@id']
assert res.json['@context'] == '/terms/'
assert res.json['notification'] == 'Success'
assert res.json['title'] == 'Search'
assert res.json['total'] > 0
assert 'facets' in res
assert 'filters' in res
assert '@graph' in res
def test_search_with_embedding(workbook, es_testapp):
""" Searches for a family and checks members.*, an embedded field, is properly resolved """
res = es_testapp.get('/search/?type=Family&limit=all').json
embed = res['@graph'][0]['members']
assert embed[0]['father']['display_title'] == 'GAPID3PW26SK' # all are same so order does not matter
assert embed[0]['mother']['display_title'] == 'GAPIDISC7R73'
assert embed[1]['father']['display_title'] == 'GAPID3PW26SK'
assert embed[1]['mother']['display_title'] == 'GAPIDISC7R73'
def test_search_with_simple_query(workbook, es_testapp):
"""
Tests simple query string searches on CGAP using type-based
q= and generic q=
"""
# run a simple query with type=Disorder and q=Dummy
res = es_testapp.get('/search/?type=Disorder&q=Dummy').json
assert len(res['@graph']) == 3
# get the uuids from the results
dummy_uuids = [org['uuid'] for org in res['@graph'] if 'uuid' in org]
# run the same search with type=Item
res = es_testapp.get('/search/?type=Item&q=Dummy').json
assert len(res['@graph']) >= 3
all_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item]
# make sure all uuids found in the first search are present in the second
assert set(dummy_uuids).issubset(set(all_uuids))
# run with q=Dum returns the same hits...
res = es_testapp.get('/search/?type=Item&q=Dum').json
dum_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item]
# make sure all uuids found in the first search are present in the second
assert set(dummy_uuids).issubset(set(dum_uuids))
# should eliminate first and third level disorders
res = es_testapp.get('/search/?type=Disorder&q=Sub+-Second').json
assert len(res['@graph']) == 1
# include first level
res = es_testapp.get('/search/?type=Disorder&q=(Sub+-Second) | oranges').follow().json
assert len(res['@graph']) == 2
# exclude all
res = es_testapp.get('/search/?type=Disorder&q=(oranges)+(apples)+(bananas)', status=404)
def test_search_ngram(workbook, es_testapp):
"""
Tests edge-ngram related behavior with simple query string
"""
# test search beyond max-ngram, should still give one result
res = es_testapp.get('/search/?type=Item&q=Second+Dummy+Sub+Disorder').json
assert len(res['@graph']) == 1
# run search with q=Du (should get nothing since max_ngram=3)
es_testapp.get('/search/?type=Item&q=D', status=404)
# run search with q=ummy (should get nothing since we are using edge ngrams)
es_testapp.get('/search/?type=Item&q=ummy', status=404)
# test ngram on upper bound
res1 = es_testapp.get('/search/?type=Item&q=information').json
assert len(res1['@graph']) > 0
# should get same results
res2 = es_testapp.get('/search/?type=Item&q=informatio').json
# should have same results in res1
assert len(res1['@graph']) == len(res2['@graph'])
# should get nothing
es_testapp.get('/search/?type=Item&q=informatix', status=404)
# will get same results as res1 and res2
res3 = es_testapp.get('/search/?type=Item&q=informatioabd').json
assert len(res2['@graph']) == len(res3['@graph'])
# search for part of uuid common, should get all 3
res4 = es_testapp.get('/search/?type=Disorder&q=231111bc').json
assert len(res4['@graph']) == 3
# search for full uuid
res5 = es_testapp.get('/search/?type=Disorder&q=231111bc-8535-4448-903e-854af460b25').json
assert len(res4['@graph']) == 3
# uuid difference beyond 10
res6 = es_testapp.get('/search/?type=Disorder&q=231111bc-89').json
assert len(res4['@graph']) == 3
# uuid difference at 10 (should get no results)
es_testapp.get('/search/?type=Disorder&q=231111bc-9', status=404)
@pytest.mark.skip # XXX: What is this really testing?
def test_search_facets_and_columns_order(workbook, es_testapp):
# TODO: Adjust ordering of mixed-in facets, perhaps sort by lookup or something, in order to un-xfail.
test_type = 'experiment_set_replicate'
type_info = es_testapp.app.registry[TYPES].by_item_type[test_type]
schema = type_info.schema
schema_facets = [('type', {'title': 'Data Type'})]
schema_facets.extend(schema['facets'].items())
# the following facets are added after schema facets
schema_facets.append(('status', {'title': 'Status'}))
# remove any disabled facets
schema_facets = [fct for fct in schema_facets if not fct[1].get('disabled', False)]
sort_facets = sorted(schema_facets, key=lambda fct: fct[1].get('order', 0))
res = es_testapp.get('/search/?type=ExperimentSetReplicate&limit=all').json
for i,val in enumerate(sort_facets):
assert res['facets'][i]['field'] == val[0]
# assert order of columns when we officially upgrade to python 3.6 (ordered dicts)
for key,val in schema.get('columns', {}).items():
assert res['columns'][key]['title'] == val['title']
@pytest.fixture
def dd_dts(es_testapp, workbook):
# returns a dictionary of strings of various date and datetimes
# relative to the creation date of the mboI one object in test inserts
enz = es_testapp.get('/search/?type=Disorder&disorder_name=Dummy+Disorder').json['@graph'][0]
cdate = enz['date_created']
_date, _time = cdate.split('T')
yr, mo, day = [int(i) for i in _date.split('-')]
hr, mi, _ = _time.split(':', 2)
hr = int(hr)
mi = int(mi)
createdate = datetime(yr, mo, day, hr, mi)
return {
'creationdatetime': ':'.join(str(createdate).replace(' ', '+').split(':')[:-1]),
'creationdate': str(createdate.date()) + '+00:00',
'daybefore': ':'.join(str(createdate - timedelta(days=1)).replace(' ', '+').split(':')[:-1]),
'dayafter': ':'.join(str(createdate + timedelta(days=1)).replace(' ', '+').split(':')[:-1]),
'hourbefore': ':'.join(str(createdate - timedelta(hours=1)).replace(' ', '+').split(':')[:-1]),
'hourafter': ':'.join(str(createdate + timedelta(hours=1)).replace(' ', '+').split(':')[:-1])
}
def test_search_date_range_find_within(dd_dts, es_testapp, workbook):
notice_pytest_fixtures(workbook)
# the MboI enzyme should be returned with all the provided pairs
gres = es_testapp.get('/search/?type=Disorder&disorder_name=Dummy+Disorder').json
g_uuids = [item['uuid'] for item in gres['@graph'] if 'uuid' in item]
dts = {k: v.replace(':', '%3A') for k, v in dd_dts.items()}
datepairs = [
(dts['daybefore'], dts['dayafter']),
(dts['creationdatetime'], dts['dayafter']),
(dts['daybefore'], dts['creationdatetime']),
(dts['creationdate'], dts['dayafter']),
(dts['hourbefore'], dts['hourafter'])
]
for dp in datepairs:
search = '/search/?type=Disorder&date_created.from=%s&date_created.to=%s' % dp
sres = es_testapp.get(search).json
s_uuids = [item['uuid'] for item in sres['@graph'] if 'uuid' in item]
assert set(g_uuids).issubset(set(s_uuids))
@pytest.mark.skip # XXX: how to best port?
def test_search_with_nested_integer(es_testapp, workbook):
notice_pytest_fixtures(workbook)
search0 = '/search/?type=ExperimentHiC'
s0res = es_testapp.get(search0).json
s0_uuids = [item['uuid'] for item in s0res['@graph'] if 'uuid' in item]
search1 = '/search/?type=ExperimentHiC&files.file_size.to=1500'
s1res = es_testapp.get(search1).json
s1_uuids = [item['uuid'] for item in s1res['@graph'] if 'uuid' in item]
assert len(s1_uuids) > 0
search2 = '/search/?type=ExperimentHiC&files.file_size.from=1501'
s2res = es_testapp.get(search2).json
s2_uuids = [item['uuid'] for item in s2res['@graph'] if 'uuid' in item]
assert len(s2_uuids) > 0
# make sure there is no intersection of the uuids
assert not set(s1_uuids) & set(s2_uuids)
assert set(s1_uuids) | set(s2_uuids) == set(s0_uuids)
def test_search_date_range_dontfind_without(dd_dts, es_testapp, workbook):
notice_pytest_fixtures(workbook)
# the MboI enzyme should be returned with all the provided pairs
dts = {k: v.replace(':', '%3A') for k, v in dd_dts.items()}
datepairs = [
(dts['daybefore'], dts['creationdate']),
(dts['hourafter'], dts['dayafter']),
(dts['daybefore'], dts['hourbefore'])
]
for dp in datepairs:
search = '/search/?type=Disorder&date_created.from=%s&date_created.to=%s' % dp
assert es_testapp.get(search, status=404)
def test_search_query_string_AND_NOT_cancel_out(workbook, es_testapp):
"""
Tests if you use + and - with same field you should get no result
"""
search = '/search/?q=cell+-cell&type=Family'
assert es_testapp.get(search, status=404)
def test_search_query_string_with_booleans(workbook, es_testapp):
"""
Tests some search queries involving booleans on users
"""
search = '/search/?type=User&q=HMS'
res_stem = es_testapp.get(search).json
assert len(res_stem['@graph']) > 1
uuids = [r['uuid'] for r in res_stem['@graph'] if 'uuid' in r]
wrangler_uuid = "986b362f-4eb6-4a9c-8173-3ab267307e3b"
tester_uuid = "986b362f-4eb6-4a9c-8173-3ab267307e4c"
# assert induced_stem_uuid not in not_induced_uuids
# now search for stem +induced (AND is now "+")
search_and = '/search/?type=User&q=scientist+%2Bcurrent'
res_both = es_testapp.get(search_and).json
both_uuids = [r['uuid'] for r in res_both['@graph'] if 'uuid' in r]
assert len(both_uuids) == 2
assert wrangler_uuid in both_uuids
assert tester_uuid in both_uuids
# search with OR ("|")
search_or = '/search/?type=User&q=scientist+%7Ctesting'
res_or = es_testapp.get(search_or).json
or_uuids = [r['uuid'] for r in res_or['@graph'] if 'uuid' in r]
assert wrangler_uuid in | |
<filename>Traclus_DL.py<gh_stars>1-10
from Trajectory import *
from ClusterQ import *
import sys
import datetime
from collections import defaultdict
from numpy import arange
from itertools import count
import math
"""This is a collection of methods to read in a set of 2 dimensional desire lines trajectories, and create segmented Trajectory data structures,
run our adapted angle-based DBScan using each segment as a "seed", putting those that respect the minimum sum of weight (density)
requirement in a priority queue. Clusters are then popped from the queue, the average X and Y of their starts and ends taken, and these output to a file
for visualization in the QVis program. """
#data structures to avoid having to recalculate distances between segments and lines
segment_to_line_dist = defaultdict(lambda : defaultdict(float))
segment_to_line_closest_seg = defaultdict(lambda : defaultdict(Trajectory.TrajectorySegment))
def round_to(n, precision):
correction = precision/2.0 if n >= 0 else -precision/2.0
return int(n/precision+correction)*precision
def point_segment_distance(px, py, x1, y1, x2, y2):
""" returns the distance from a point in 2-D (px,py) to a line segment
going from (x1, y1) to (x12, y12), and also returns the x and y coordinates of the closest point in the line segment to (px,py)
This function is adapted from a stackoverflow.com answer provided by user "<NAME>"
at http://stackoverflow.com/questions/2824478/shortest-distance-between-two-line-segments (last accessed November 6, 2014)
and is under the Creative Commons License http://creativecommons.org/licenses/by-sa/3.0/
"""
# print("called with ", px, py, x1, y1, x2, y2)
dx = x2 - x1
dy = y2 - y1
if dx == dy == 0: # the segment's just a point
return math.hypot(px - x1, py - y1)
# Calculate the t that minimizes the distance.
t = ((px - x1) * dx + (py - y1) * dy) / (dx * dx + dy * dy)
# See if this represents one of the segment's
# end points or a point in the middle.
near_x = 0
near_y = 0
if t < 0:
dx = px - x1
dy = py - y1
near_x = x1
near_y = y1
elif t > 1:
dx = px - x2
dy = py - y2
near_x = x2
near_y = y2
else:
near_x = x1 + t * dx
near_y = y1 + t * dy
dx = px - near_x
dy = py - near_y
#if math.hypot(dx,dy) < 101:
# print "came up with ", math.hypot(dx,dy), near_x, near_y
return (math.hypot(dx, dy), near_x, near_y)
def reachable(seg1, seg_angle, traj_angles, max_dist, max_angle, segment_to_line_dist, segment_to_line_closest_seg):
"""For a given segment and other parameters (the maximum angle, maximum distance), find all other segments reachable from that segment"""
reachable_segs = []
sumweight = 0.
for angle in traj_angles:
#go through the dictionary of angles, only consider those that are less than max_angle from the seg_angle
if abs(angle - seg_angle) > max_angle:
continue
for line2 in traj_angles[angle]:
#go through the lines for that angle, calculate distance of that line to that segment if not already done, and then check if less than max dist
if(line2.name not in segment_to_line_dist[seg1.id]): #segment_to_line_dist keeps track of distances already calculated, avoid more than once
segment_to_line_dist[seg1.id][line2.name], closest_x, closest_y = point_segment_distance((seg1.startx+seg1.endx)/2, (seg1.starty+seg1.endy)/2, line2.startx, line2.starty, line2.endx, line2.endy)
if segment_to_line_dist[seg1.id][line2.name] > max_dist:
segment_to_line_closest_seg[seg1.id][line2.name] = max_dist + 1;
else:
segment_to_line_closest_seg[seg1.id][line2.name] = line2.get_segment_at(closest_x, closest_y)
if segment_to_line_dist[seg1.id][line2.name] <= max_dist:
#Respects both angle and distance limits! add to list of reachable segments and increment weight
reachable_segs.append(segment_to_line_closest_seg[seg1.id][line2.name])
sumweight = sumweight + line2.weight
return sumweight, reachable_segs
def DBScan(seg1, traj_angles, max_dist, min_weight, max_angle, segment_to_line_dist, segment_to_line_closest_seg):
"""implementation of DBScan with an angle twist. Find segments reachable from seg1 that respect both angle and distance criteria. Then expands cluster as done in
classic DBScan, but angles are not allowed to expand, i.e. all final members of the cluster have an angle less than max_angle with the original seg1"""
reachable_segs = set() #keep track of those reachable from set1
sumweight, reachable_segs = reachable(seg1, seg1.parent.angle, traj_angles, max_dist, max_angle, segment_to_line_dist, segment_to_line_closest_seg); #add those reachable based on the maximum angle and maximum distance (epsilon)
if sumweight < min_weight:
return (-1, [])
else:
return expand_cluster(seg1, traj_angles, reachable_segs, max_dist, min_weight, max_angle, segment_to_line_dist, segment_to_line_closest_seg)
def expand_cluster(seg1, traj_angles, reachable_segs, max_dist, min_weight, max_angle, segment_to_line_dist, segment_to_line_closest_seg):
""" Expansion of cluster from seg1 as in classic DBScan but angles are not allowed to expand, i.e. all final members of the cluster have an angle less than max_angle with the original seg1"""
corridor_assignment = set()
represented_lines = set() # for a given line, only one representative segment per cluster/corridor
represented_lines.add(seg1.parent)
expanded_sum_weight = seg1.parent.weight
corridor_assignment.add(seg1) #this will definitely be in the corridor, and does not need to be expanded as we have found everything reachable from seg1
while len(reachable_segs) > 0:
new_candidates = [] #this is the list of segments that we continue to expand
for seg2 in reachable_segs:
if seg2 not in corridor_assignment and seg2.parent not in represented_lines:
represented_lines.add(seg2.parent)
corridor_assignment.add(seg2)
expanded_sum_weight += seg2.parent.weight
seg2_sum_weight, new_reachable = reachable(seg2, seg1.parent.angle, traj_angles, max_dist, max_angle, segment_to_line_dist, segment_to_line_closest_seg); #add those reachable based on the maximum angle and maximum distance (epsilon). Note that the second argument is not a typo as the angles are kept close to those of the original "seed" segment (seg1)
if seg2_sum_weight >= min_weight:
for seg3 in new_reachable:
if seg3 not in reachable_segs and seg3.parent not in represented_lines:
new_candidates.append(seg3)
reachable_segs = new_candidates
return (expanded_sum_weight, corridor_assignment)
def print_weighted_averages(cluster_segments, corr_number, oh):
"""File output method for printing weighted average of start and ends for the segments assigned to that cluster. File format is designed for visualization with QVis"""
weightsum = 0.
x1sum = 0.
y1sum = 0.
x2sum = 0.
y2sum = 0.
for segment in cluster_segments:
weightsum += segment.parent.weight
x1sum += segment.startx * segment.parent.weight
x2sum += segment.endx * segment.parent.weight
y1sum += segment.starty * segment.parent.weight
y2sum += segment.endy * segment.parent.weight
oh.write(str(corr_number) + "\t"+ str(weightsum) + "\tLINESTRING(" + str(x1sum/weightsum) + " "+ str(y1sum/weightsum) + ", " + str(x2sum/weightsum) + " " + str(y2sum/weightsum) + ")\n")
def get_traj_by_name(trajectories, name):
"""Search for particular trajectory name, used mostly for testing purposes"""
print trajectories
for traj in trajectories:
if traj.name == name:
return traj
def read_file(infile, segment_size, traj_angles, trajectories):
"""File input, one row per desire line with columns corresponding to line name, line weight, start x, start y, end x and end y, with comment lines (not to be included as desire lines
starting with #"""
fh = open (infile, 'r')
for line in fh:
if line.startswith("#"):
continue
linelist = line.split();
traj = Trajectory(name=linelist[0], weight = float(linelist[1]), startx=float(linelist[2]), starty=float(linelist[3]), endx=float(linelist[4]), endy=float(linelist[5]))
traj.make_segments(segment_size)
rounded_angle = round_to(traj.angle, 0.5);
traj_angles[rounded_angle].append(traj) #add the trajectory to a list in the dictionary of angles
trajectories.append(traj)
def build_DB_queue(trajectories, traj_angles, max_dist, min_density, max_angle, segment_to_line_dist, segment_to_line_closest_seg):
"""Create empty ClusterQ, call DBScan with each segment in the set of all desire lines. Add those to the queue that respect minimum weight, return the ClusterQ of all
segments after DBScan run for all"""
Q = ClusterQ(min_density)
for line in trajectories:
for segment in line.segments:
sumweight, segments = DBScan(segment, traj_angles, max_dist, min_density, max_angle, segment_to_line_dist, segment_to_line_closest_seg)
if sumweight >= min_density:
Q.add_cluster(segment, segments, int(sumweight*100))
return Q
if __name__ == '__main__':
infile = sys.argv[1]
max_dist = float(sys.argv[2])
min_density = float(sys.argv[3])
max_angle = float(sys.argv[4])
segment_size = float(sys.argv[5])
# For each line, get candidate lines from angle dictionary, find within distance
traj_angles = defaultdict(list) #look up angles fast, each angle (in degrees) will have a list of Trajectory objects
trajectories = [] # list of trajectories, keep it?
read_file(infile, segment_size, traj_angles, trajectories)
segment_list_oh = open(infile + "." + str(max_dist) + "." + str(min_density) + "." + str(max_angle) + "." + str(segment_size) + ".segmentlist.txt", "w")
corridor_list_oh = open (infile + "." + str(max_dist) + "." + str(min_density) + "." + str(max_angle) + "." + str(segment_size) + ".corridorlist.txt", "w")
corridor_list_oh.write("name\tweight\tcoordinates\n");
segment_list_oh.write("line_id\tweight\tangle\tcorridor_id\tcoordinates\n");
build_DB_queue(trajectories, traj_angles, max_dist, min_density, max_angle, segment_to_line_dist, segment_to_line_closest_seg)
#To do, move segment file output to a separate function
corridor = 0
assigned = set()
while True:
try:
cluster_segments = pop_cluster()
print_weighted_averages(cluster_segments, corridor, corridor_list_oh)
for segment in cluster_segments:
segment_list_oh.write(segment.parent.name + | |
list of genome_servers.
Use reset argument to overwrite the current list. Otherwise the current one will be appended to.
:param list[str] | str url: url(s) to update the genome_servers list with
:param bool reset: whether the current list should be overwritten
"""
urls = _make_list_of_str(url)
if CFG_SERVERS_KEY in self:
if reset:
self[CFG_SERVERS_KEY] = _extend_unique([], urls)
else:
self[CFG_SERVERS_KEY] = _extend_unique(self[CFG_SERVERS_KEY], urls)
else:
raise GenomeConfigFormatError("The '{}' is missing. Can't update the server list".format(CFG_SERVERS_KEY))
def subscribe(self, urls, reset=False):
"""
Add URLs the list of genome_servers.
Use reset argument to overwrite the current list.
Otherwise the current one will be appended to.
:param list[str] | str urls: urls to update the genome_servers list with
:param bool reset: whether the current list should be overwritten
"""
if self.file_path:
with self as r:
r._update_genome_servers(url=urls, reset=reset)
else:
self._update_genome_servers(url=urls, reset=reset)
_LOGGER.info("Subscribed to: {}".format(", ".join(urls)))
def unsubscribe(self, urls):
"""
Remove URLs the list of genome_servers.
:param list[str] | str urls: urls to update the genome_servers list with
"""
unsub_list = []
ori_servers = self[CFG_SERVERS_KEY]
for s in urls:
try:
ori_servers.remove(s)
unsub_list.append(s)
except ValueError:
_LOGGER.warning("URL '{}' not in genome_servers list: {}".format(s, ori_servers))
if self.file_path:
with self as r:
r._update_genome_servers(ori_servers, reset=True)
else:
self._update_genome_servers(ori_servers, reset=True)
if unsub_list:
_LOGGER.info("Unsubscribed from: {}".format(", ".join(unsub_list)))
def getseq(self, genome, locus, as_str=False):
"""
Return the sequence found in a selected range and chromosome.
Something like the refget protocol.
:param str genome: name of the sequence identifier
:param str locus: coordinates of desired sequence, e.g. 'chr1:1-10'
:param bool as_str: whether to convert the resurned object to string
and return just the sequence
:return str | pyfaidx.FastaRecord | pyfaidx.Sequence: selected sequence
"""
import pyfaidx
fa = pyfaidx.Fasta(self.seek(genome, "fasta", strict_exists=True))
locus_split = locus.split(":")
chr = fa[locus_split[0]]
if len(locus_split) == 1:
return str(chr) if as_str else chr
start, end = locus_split[1].split("-")
_LOGGER.debug("chr: '{}', start: '{}', end: '{}'".
format(locus_split[0], start, end))
return str(chr[int(start):int(end)]) \
if as_str else chr[int(start):int(end)]
def get_genome_attributes(self, genome):
"""
Get the dictionary attributes, like checksum, contents, description.
Does not return the assets.
:param str genome: genome to get the attributes dict for
:return Mapping[str, str]: available genome attributes
"""
return {k: self[CFG_GENOMES_KEY][genome][k]
for k in CFG_GENOME_ATTRS_KEYS if k in self[CFG_GENOMES_KEY][genome]}
def is_asset_complete(self, genome, asset, tag):
"""
Check whether all required tag attributes are defined in the RefGenConf object.
This is the way we determine tag completeness.
:param str genome: genome to be checked
:param str asset: asset package to be checked
:param str tag: tag to be checked
:return bool: the decision
"""
tag_data = self[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset][CFG_ASSET_TAGS_KEY][tag]
return all([r in tag_data for r in REQ_TAG_ATTRS])
def _invert_genomes(self, order=None):
""" Map each asset type/kind/name to a collection of assemblies.
A configuration file encodes assets by genome, but in some use cases
it's helpful to invert the direction of this mapping. The value of the
asset key/name may differ by genome, so that information is
necessarily lost in this inversion, but we can collect genome IDs by
asset ID.
:param function(str) -> object order: how to key genome IDs and asset
names for sort
:return OrderedDict[str, Iterable[str]] binding between asset kind/key/name
and collection of reference genome assembly names for which the
asset type is available
"""
genomes = {}
for g, am in self[CFG_GENOMES_KEY].items():
for a in am[CFG_ASSETS_KEY].keys():
genomes.setdefault(a, []).append(g)
assets = sorted(genomes.keys(), key=order)
return OrderedDict([(a, sorted(genomes[a], key=order)) for a in assets])
def _chk_digest_if_avail(self, genome, remote_asset_name, server_url):
"""
Check local asset digest against the remote one and populate children of the asset with the provided asset:tag.
In case the local asset does not exist, the config is populated with the remote asset digest and children data
:param str genome: name of the genome to check the asset digests for
:param str remote_asset_name: asset and tag names, formatted like: asset:tag
:param str server_url: addres of the server to query for the digests
:raise RefgenconfError: if the local digest does not match its remote counterpart
"""
remote_asset_data = prp(remote_asset_name)
asset = remote_asset_data["item"]
tag = remote_asset_data["tag"]
asset_digest_url = construct_request_url(server_url, API_ID_DIGEST).\
format(genome=genome, asset=asset, tag=tag)
try:
remote_digest = _download_json(asset_digest_url)
except DownloadJsonError:
_LOGGER.warning("Parent asset ({}/{}:{}) not found on the server. The asset provenance was not verified.".
format(genome, asset, tag))
return
try:
local_digest = self.id(genome, asset, tag)
if remote_digest != local_digest:
raise RemoteDigestMismatchError(asset, local_digest, remote_digest)
except RefgenconfError:
_LOGGER.debug("Could not find '{}/{}:{}' digest. Digest for this parent will be populated "
"with the server one after the pull".format(genome, asset, tag))
return
def chk_digest_update_child(self, genome, remote_asset_name, child_name, server_url):
"""
Check local asset digest against the remote one and populate children of the asset with the provided asset:tag.
In case the local asset does not exist, the config is populated with the remote asset digest and children data
:param str genome: name of the genome to check the asset digests for
:param str remote_asset_name: asset and tag names, formatted like: asset:tag
:param str child_name: name to be appended to the children of the parent
:param str server_url: address of the server to query for the digests
:raise RefgenconfError: if the local digest does not match its remote counterpart
"""
remote_asset_data = prp(remote_asset_name)
asset = remote_asset_data["item"]
tag = remote_asset_data["tag"]
asset_digest_url = construct_request_url(server_url, API_ID_DIGEST).\
format(genome=genome, asset=asset, tag=tag)
try:
remote_digest = _download_json(asset_digest_url)
except DownloadJsonError:
return
try:
# we need to allow for missing seek_keys section so that the digest is respected even from the previously
# populated 'incomplete asset' from the server
_assert_gat_exists(self[CFG_GENOMES_KEY], genome, asset, tag,
allow_incomplete=not self.is_asset_complete(genome, asset, tag))
except (KeyError, MissingAssetError, MissingGenomeError, MissingSeekKeyError):
self.update_tags(genome, asset, tag, {CFG_ASSET_CHECKSUM_KEY: remote_digest})
_LOGGER.info("Could not find '{}/{}:{}' digest. Populating with server data".format(genome, asset, tag))
else:
local_digest = self[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset][CFG_ASSET_TAGS_KEY] \
[tag][CFG_ASSET_CHECKSUM_KEY]
if remote_digest != local_digest:
raise RemoteDigestMismatchError(asset, local_digest, remote_digest)
finally:
self.update_relatives_assets(genome, asset, tag, [child_name], children=True)
def id(self, genome, asset, tag=None):
"""
Returns the digest for the specified asset.
The defined default tag will be used if not provided as an argument
:param str genome: genome identifier
:param str asset: asset identifier
:param str tag: tag identifier
:return str: asset digest for the tag
"""
_assert_gat_exists(self[CFG_GENOMES_KEY], genome, asset, tag)
tag = tag or self.get_default_tag(genome, asset)
a = self[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset]
if CFG_ASSET_CHECKSUM_KEY in a[CFG_ASSET_TAGS_KEY][tag]:
return a[CFG_ASSET_TAGS_KEY][tag][CFG_ASSET_CHECKSUM_KEY]
raise MissingConfigDataError("Digest does not exist for: {}/{}:{}".
format(genome, asset, tag))
def run_plugins(self, hook):
"""
Runs all installed plugins for the specified hook.
:param str hook: hook identifier
"""
for name, func in self.plugins[hook].items():
_LOGGER.debug("Running {} plugin: {}".format(hook, name))
func(self)
def write(self, filepath=None):
"""
Write the contents to a file.
If pre- and post-update plugins are defined, they will be executed automatically
:param str filepath: a file path to write to
:raise OSError: when the object has been created in a read only mode or other process has locked the file
:raise TypeError: when the filepath cannot be determined.
This takes place only if YacAttMap initialized with a Mapping as an input, not read from file.
:raise OSError: when the write is called on an object with no write capabilities
or when writing to a file that is locked by a different object
:return str: the path to the created files
"""
self.run_plugins(PRE_UPDATE_HOOK)
path = super(RefGenConf, self).write(filepath=filepath)
self.run_plugins(POST_UPDATE_HOOK)
return path
class DownloadProgressBar(tqdm):
"""
from: https://github.com/tqdm/tqdm#hooks-and-callbacks
"""
def update_to(self, b=1, bsize=1, tsize=None):
"""
Update the progress bar
:param int b: number of blocks transferred so far
:param int bsize: size of each block (in tqdm units)
:param int tsize: total size (in tqdm units)
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def _download_json(url, params=None):
"""
Safely connect to the provided API endpoint and download JSON data.
:param str url: server API endpoint
:param dict params: query parameters
:return dict: served data
"""
import requests
_LOGGER.debug("Downloading JSON data; querying URL: '{}'".format(url))
resp = requests.get(url, params=params)
if resp.ok:
return resp.json()
elif resp.status_code == 404:
resp = None
raise DownloadJsonError(resp)
def _download_url_progress(url, output_path, name, params=None):
"""
Download asset at given URL to given filepath, show progress along the way.
:param str url: server API endpoint
:param str output_path: | |
the self.image_info list.
# this happens in place, and returns None
shuffle(self.image_info)
# add all the class info
for category in updated_category_dict:
# self.add_class(dataset_name, category['id'], category['name'])
self.add_class(dataset_name,
updated_category_dict[category],
category)
print(len(updated_category_dict))
print(updated_category_dict)
print(skip_classes)
print(len(self.image_info))
print('\t'*3 + "#"*20 + '\n'*2)
print('\t'*3 + "CONFIG Daset" + '\n'*2)
print(len(self.class_info))
print('\t'*3 + "#"*20 + '\n'*2)
def visualize(self, image_id):
tmp_dir = './tmp'
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
img = self.load_image(image_id)
mask, class_id = self.load_mask(image_id)
color_set = [np.array([0, 0, 255]),
np.array([0, 255, 0]),
np.array([255, 0, 0]),
np.array([255, 255, 0]),
np.array([0, 255, 255]),
np.array([255, 0, 255])
]
total_color_number = len(color_set)
alpha = 0.6
print(mask.shape)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
for idx, single_mask in enumerate(mask.transpose(2, 0, 1)):
index = np.where(single_mask.astype(float)>0.5)
color_index = np.random.randint(total_color_number)
img[index] = img[index] * alpha + (1-alpha) * color_set[color_index]
# attach class name for visualization at top left corner of mask
try:
min_row = min(index[0])
min_col = min(index[1])
except ValueError:
continue
cv2.putText(img, self.idx_to_name_map[class_id[idx]], (min_col, min_row), fontFace, fontScale, color_set[color_index].tolist(), lineType=cv2.LINE_AA)
# convert rgb to bgr
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(tmp_dir, str(image_id)+'.png'), img)
# color_set = [[0, 0, 255],
# [0, 255, 0],
# [255, 0, 0],
# [255, 255, 0],
# [0, 255, 255],
# [255, 0, 255]
# ]
# # directly use polygon to for visluation
# img = self.load_image(image_id)
# for polygon in self.image_info[image_id]['polygon_list']:
# color_index = np.random.randint(total_color_number)
# cv2.fillPoly(img, np.array([polygon]).astype(int), color_set[color_index])
cv2.imwrite(str(image_id)+'_fill_' + '.png', img)
def load_image(self, image_id):
if self.image_info[image_id]['image_type'] == 'patch':
image = super(SatelliteDataset, self).load_image(image_id)
elif self.image_info[image_id]['image_type'] == 'big':
# only pick small patch at specified location
big_image_id = self.image_info[image_id]['big_image_id'] # '001', '002', ....
big_image = self.image_dict[big_image_id] # numpy array for big image
start_h = self.image_info[image_id]['start_h']
start_w = self.image_info[image_id]['start_w']
patch_height = self.image_info[image_id]['height']
patch_width = self.image_info[image_id]['width']
image = big_image[start_h:start_h+patch_height, start_w:start_w+patch_width]
# resize the image for preprocessing
# print(image.shape)
# image = skimage.transform.resize(image, (256, 256))
# print(image.shape)
return image
def load_mask(self, image_id):
"""
============================================================
For this preliminary task, we only work on three classes
Tree,
Vehicles,
Buildings,
===========================================================
"""
object_list = self.image_info[image_id]['object_list']
polygon_list = self.image_info[image_id]['polygon_list']
width = self.image_info[image_id]['width']
height = self.image_info[image_id]['height']
# print(self.image_info[image_id]['path'])
assert len(object_list) == len(polygon_list), "object number and ploy number doesn't match"
mask_array = []
class_id_array = []
object_number_threshold = 1e8
for idx, (label, polygon) in enumerate(zip(object_list, polygon_list)):
if idx >= object_number_threshold:
# Too many objects would be out of memory
break
black_ground = np.zeros((height, width))
cv2.fillPoly(black_ground, np.array([polygon]).astype(int), (1.,))
# cv2.fillConvexPoly(black_ground, np.array([polygon]).astype(int), (1.,))
black_ground = black_ground.astype(bool)
mask_array.append(black_ground)
class_id_array.append(object_list[idx])
# mask_array = np.stack(mask_array, axis=0).transpose(1, 2, 0).astype(bool)
mask_array = np.stack(mask_array, axis=2)
class_id_array = np.array(class_id_array)
# print(mask_array.shape)
return mask_array, class_id_array
def config_dataset_with_big_image(self,
dataset_name,
root_dir):
"""
Args:
root_dir: string. The directory saving all the big images and annotations.
"""
# read the ground truth category information
category_map, super_category_set = category_map_name_to_super("../data/complete.json")
# add all the class info
name_to_idx_map = dict()
self.idx_to_name_map = dict()
for idx, category in enumerate(super_category_set):
# self.add_class(dataset_name, category['id'], category['name'])
name_to_idx_map[category] = idx+1
self.idx_to_name_map[idx+1] = category
self.add_class(dataset_name,
idx+1,
category)
if self.mode == 'evaluation':
return
def calculate_intersection_over_poly1(poly1, poly2):
"""
It is not the iou on usual, the iou is the value of intersection over poly1
"""
# Get the intersection of two polygons.
inter_poly = poly1.intersection(poly2)
# area
inter_area = inter_poly.area
poly1_area = poly1.area
half_iou = inter_area / poly1_area
# the area of inter-poly over the poly1_area.
return half_iou, inter_poly
def convert_poly_2_numpy(polygon):
"""
Args:
polygon: shapely.geometric object
Returns:
:params numpy
"""
#pdb.set_trace()
try:
polygon = mapping(polygon)['coordinates']
except KeyError:
polygon = mapping(polygon)['geometries']
for instance in polygon:
if instance['type'] == 'Polygon':
polygon = instance['coordinates']
break
polygon = polygon[0]
polygon = np.stack(polygon, axis=0)
return polygon
def get_class_id_and_polygon(start_point=None,
patch_size=None,
annotation_dict=None,
category_map=None,
name_to_idx_map=None
):
"""
Args:
start_point: (y, x)
steps: (step_y, step_x)
annotation_set: polygon and labels
category_map: map_dict {name: super_category}
skip_classes: Llist[supercategory_name]
Returns:
object_list and polygon_list
"""
def convert_polygon_to_box(polygon, box_format='yxyx'):
"""
Args:
polygon: [[x, y], [x, y], [x, y], ...]
Returns
box: [y1, x1, y2, x2]
"""
xy_min = np.amin(polygon, axis=0)
xy_max = np.amax(polygon, axis=0)
if box_format == 'xyxy':
return np.concatenate([xy_min, xy_max])
elif box_format == 'yxyx':
return np.concatenate([xy_min[::-1], xy_max[::-1]])
def truncate_polygon(polygon, patch_box):
"""
Truncate the polygon, to make it inside the images
Args:
polygon: [[x,y], [x,y], ...]
patch_box: [y1, x1, y2, x2]
Args:
polygon: [[x, y], [x, y], ...] ... x1 <= x <= x2, y1 <= y <= y2
"""
# check the x_min
patch_width = patch_box[3] - patch_box[1]
patch_height = patch_box[2] - patch_box[0]
polygon[:, 0] = np.minimum(np.maximum(polygon[:, 0] - patch_box[1], 0), patch_width)
polygon[:, 1] = np.minimum(np.maximum(polygon[:, 1] - patch_box[0], 0), patch_height)
return polygon
def computer_intersection_over_box2(box1, box2):
"""Computer intersection area over the second box."""
# box1: The patch image location, format (y1, x1, y2, x2)
# box2: the bbox of polygon, format (x1, y1, x2, y2)
y1 = max(box1[0], box2[0])
x1 = max(box1[1], box2[1])
y2 = min(box1[2], box2[2])
x2 = min(box1[3], box2[3])
intersection_area = max(y2-y1, 0) * max(x2-x1, 0)
box2_area = max(box2[2]-box2[0], 0) * max(box2[3]-box2[1], 0)
area_theshold = 1e-3 # if the polgon is too small, we will ignore it.
if box2_area < area_theshold:
iou = 0
else:
iou = intersection_area / box2_area
# convert polygon into box by choosing the x_min, y_min, x_max, y_max
return iou
# print("len(annotation_dict['objects'])", len(annotation_dict['objects']))
def check_if_center_inside_patch(center, patch_box):
"""
check if the center point is inside the patch
Args:
center: [x, y]
patch_box: [y_min, x1, y2, x2]
Returs:
flag: True for inside, False for outside
"""
# to make sure the object is completely inside the
slack = 0
flag = (center[0] >= (patch_box[1]+slack)) and \
(center[0] <= (patch_box[3]-slack)) and \
(center[1] >= (patch_box[0]+slack)) and \
(center[1] <= (patch_box[2]-slack))
return flag
threshold = 1e-3
patch_box = start_point + [x+y for x, y in zip(start_point, patch_size)]
object_list = []
polygon_list = []
threshold_area = 0.1
for index, obj in enumerate(annotation_dict['objects']):
label = obj['label']
if label not in category_map:
# we should skip this class
continue
else:
#pdb.set_trace()
# check if the center of polygon is inside the patch
flag = check_if_center_inside_patch(obj['center'], patch_box)
if not flag:
# print("Object is not inside the patch")
continue
polygon = np.array(obj['polygon'])
# convert box to polygon
shape_polygon_patch = Polygon([[patch_box[1], patch_box[0]],
[patch_box[3], patch_box[0]],
[patch_box[3], patch_box[2]],
[patch_box[1], patch_box[2]]
]
)
# calculate the intersection area over the ground truth annotation
# check annotation poly is valid or not.
try:
shape_polygon_annotation = Polygon(polygon)
part_iou, inter_poly = calculate_intersection_over_poly1(shape_polygon_annotation,
shape_polygon_patch)
# except shapely.errors.TopologicalError, ValueError:
except:
part_iou, interpoly = 1, None
# pdb.set_trace()
if part_iou < threshold_area:
continue
elif part_iou < 0.95:
polygon = convert_poly_2_numpy(inter_poly)
else:
polygon = polygon
polygon = truncate_polygon(polygon, patch_box)
super_name = category_map[label]
super_id = name_to_idx_map[super_name]
object_list.append(super_id)
polygon_list.append(polygon)
return object_list, polygon_list
# read ground truth list
json_list = glob(os.path.join(root_dir, "*/*.json"))
tif_list = glob(os.path.join(root_dir, "*/*.tif"))
# create dictionary for ground true list
json_dict = {json_file.split('/')[-2]: json_file for json_file in json_list}
tif_dict = {tif_file.split('/')[-2]: tif_file for tif_file in tif_list}
# read all the big images into memory
image_dict = {tif_file.split('/')[-2]: cv2.imread(tif_file) for tif_file in tif_list}
for key in image_dict:
print("Read the {} big image".format(key))
image_dict[key] = cv2.cvtColor(image_dict[key], cv2.COLOR_BGR2RGB)
# self.image_dict = {key: cv2.cvtColor(image_dict[key], cv2.COLOR_BGR2RGB) for key in image_dict}
self.image_dict = image_dict
# get GT dict
gt_dict = dict()
for key in json_dict:
with open(json_dict[key]) as fid:
gt_dict[key] = json.load(fid)
# add center point for polygon
for key in gt_dict: # key: '001', '002'....
# update object annotation with center coordination
for i, anno in enumerate(gt_dict[key]['objects']):
# this happens as a references.
anno['center'] = np.mean(anno['polygon'], axis=0)
patch_height = 1024
patch_width = 1024
step_height = 500
step_width = 500
counter = 0
for big_image_id in json_dict:
# The shape of the big image
# if big_image_id != '002':
# continue
print("Processing: ", big_image_id)
height, width = image_dict[big_image_id].shape[:2]
| |
import json
import argparse
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
import os
import random
import numpy as np
import requests
import logging
import math
import copy
import wandb
import string
import re
from time import time
from tqdm import tqdm, trange
from densephrases.models import DensePhrases, MIPS, MIPSLight
from densephrases.utils.single_utils import set_seed, to_list, to_numpy, backward_compat
from densephrases.utils.squad_utils import get_question_dataloader, TrueCaser
import densephrases.utils.squad_utils as squad_utils
from densephrases.utils.embed_utils import get_question_results
from densephrases.utils.eval_utils import normalize_answer, f1_score, exact_match_score, drqa_exact_match_score, \
drqa_regex_match_score, drqa_metric_max_over_ground_truths, drqa_normalize
from densephrases.utils.kilt.eval import evaluate as kilt_evaluate
from densephrases.utils.kilt.kilt_utils import store_data as kilt_store_data
from densephrases.experiments.run_single import load_and_cache_examples
from transformers import (
MODEL_MAPPING,
AutoConfig,
AutoTokenizer,
AutoModel,
AutoModelForQuestionAnswering,
AdamW,
get_linear_schedule_with_warmup,
)
from reader import Reader
from reranker import Reranker
from utils import process_sample, process_reranker_input, process_reader_input
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def load_query_encoder(device, args):
assert args.query_encoder_path
# Configure paths for query encoder serving
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.pretrained_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.pretrained_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# Pre-trained DensePhrases
model = DensePhrases(
config=config,
tokenizer=tokenizer,
transformer_cls=MODEL_MAPPING[config.__class__],
)
try:
model.load_state_dict(backward_compat(
torch.load(os.path.join(args.query_encoder_path, 'pytorch_model.bin'), map_location=torch.device('cpu'))
))
except Exception as e:
print(e)
model.load_state_dict(torch.load(os.path.join(args.query_encoder_path, 'pytorch_model.bin')), strict=False)
model.to(device)
logger.info(f'DensePhrases loaded from {args.query_encoder_path} having {MODEL_MAPPING[config.__class__]}')
logger.info('Number of model parameters: {:,}'.format(sum(p.numel() for p in model.parameters())))
return model, tokenizer
def get_query2vec(query_encoder, tokenizer, args, batch_size=64):
device = 'cuda' if args.cuda else 'cpu'
def query2vec(queries):
question_dataloader, question_examples, query_features = get_question_dataloader(
queries, tokenizer, args.max_query_length, batch_size=batch_size
)
question_results = get_question_results(
question_examples, query_features, question_dataloader, device, query_encoder, batch_size=batch_size
)
if args.debug:
logger.info(f"{len(query_features)} queries: {' '.join(query_features[0].tokens_)}")
outs = []
for qr_idx, question_result in enumerate(question_results):
out = (
question_result.start_vec.tolist(), question_result.end_vec.tolist(), query_features[qr_idx].tokens_
)
outs.append(out)
return outs
return query2vec
def load_phrase_index(args, load_light=False):
# Configure paths for index serving
phrase_dump_dir = os.path.join(args.dump_dir, args.phrase_dir)
index_dir = os.path.join(args.dump_dir, args.index_dir)
index_path = os.path.join(index_dir, args.index_name)
idx2id_path = os.path.join(index_dir, args.idx2id_name)
# Load mips
mips_class = MIPS if not load_light else MIPSLight
mips = mips_class(
phrase_dump_dir=phrase_dump_dir,
index_path=index_path,
idx2id_path=idx2id_path,
cuda=args.cuda,
logging_level=logging.DEBUG if args.debug else logging.INFO
)
return mips
def embed_all_query(questions, args, query_encoder, tokenizer, batch_size=48):
query2vec = get_query2vec(
query_encoder=query_encoder, tokenizer=tokenizer, args=args, batch_size=batch_size
)
all_outs = []
for q_idx in tqdm(range(0, len(questions), batch_size)):
outs = query2vec(questions[q_idx:q_idx+batch_size])
all_outs += outs
start = np.concatenate([out[0] for out in all_outs], 0)
end = np.concatenate([out[1] for out in all_outs], 0)
query_vec = np.concatenate([start, end], 1)
logger.info(f'Query reps: {query_vec.shape}')
return query_vec
def load_qa_pairs(data_path, args, draft_num_examples=1000, shuffle=False):
q_ids = []
questions = []
answers = []
data = json.load(open(data_path))['data']
for item in data:
q_id = item['id']
question = item['question']
answer = item['answers']
if len(answer) == 0:
continue
q_ids.append(q_id)
questions.append(question)
answers.append(answer)
questions = [query[:-1] if query.endswith('?') else query for query in questions]
if args.truecase:
try:
logger.info('Loading truecaser for queries')
truecase = TrueCaser(os.path.join(os.environ['DPH_DATA_DIR'], args.truecase_path))
questions = [truecase.get_true_case(query) if query == query.lower() else query for query in questions]
except Exception as e:
print(e)
if args.do_lower_case:
logger.info(f'Lowercasing queries')
questions = [query.lower() for query in questions]
if args.draft:
q_ids = np.array(q_ids)[:draft_num_examples].tolist()
questions = np.array(questions)[:draft_num_examples].tolist()
answers = np.array(answers)[:draft_num_examples].tolist()
if shuffle:
qa_pairs = list(zip(q_ids, questions, answers))
random.shuffle(qa_pairs)
q_ids, questions, answers = zip(*qa_pairs)
logger.info(f'Shuffling QA pairs')
logger.info(f'Loading {len(questions)} questions from {data_path}')
logger.info(f'Sample Q ({q_ids[0]}): {questions[0]}, A: {answers[0]}')
return q_ids, questions, answers
def eval_inmemory(args, mips=None, query_encoder=None, tokenizer=None):
# Load dataset and encode queries
qids, questions, answers = load_qa_pairs(args.test_path, args)
if query_encoder is None:
print(f'Query encoder will be loaded from {args.query_encoder_path}')
device = 'cuda' if args.cuda else 'cpu'
query_encoder, tokenizer = load_query_encoder(device, args)
query_vec = embed_all_query(questions, args, query_encoder, tokenizer)
# Load MIPS
if mips is None:
mips = load_phrase_index(args)
# Search
step = args.eval_batch_size
predictions = []
evidences = []
titles = []
scores = []
for q_idx in tqdm(range(0, len(questions), step)):
result = mips.search(
query_vec[q_idx:q_idx+step],
q_texts=questions[q_idx:q_idx+step], nprobe=args.nprobe,
top_k=args.top_k, max_answer_length=args.max_answer_length,
)
prediction = [[ret['answer'] for ret in out] if len(out) > 0 else [''] for out in result]
evidence = [[ret['context'] for ret in out] if len(out) > 0 else [''] for out in result]
title = [[ret['title'] for ret in out] if len(out) > 0 else [['']] for out in result]
score = [[ret['score'] for ret in out] if len(out) > 0 else [-1e10] for out in result]
predictions += prediction
evidences += evidence
titles += title
scores += score
logger.info(f"Avg. {sum(mips.num_docs_list)/len(mips.num_docs_list):.2f} number of docs per query")
eval_fn = evaluate_results if not args.is_kilt else evaluate_results_kilt
return eval_fn(predictions, qids, questions, answers, args, evidences, scores, titles)
def evaluate_results(predictions, qids, questions, answers, args, evidences, scores, titles, q_tokens=None, save_predictions=True):
wandb.init(project="DensePhrases", entity="howard-yen", mode="online" if args.wandb else "disabled")
wandb.config.update(args)
# Filter if there's candidate
if args.candidate_path is not None:
candidates = set()
with open(args.candidate_path) as f:
for line in f:
line = line.strip().lower()
candidates.add(line)
logger.info(f'{len(candidates)} candidates are loaded from {args.candidate_path}')
topk_preds = [list(filter(lambda x: (x in candidates) or (x.lower() in candidates), a)) for a in predictions]
topk_preds = [a[:args.top_k] if len(a) > 0 else [''] for a in topk_preds]
predictions = topk_preds[:]
top1_preds = [a[0] for a in topk_preds]
else:
predictions = [a[:args.top_k] if len(a) > 0 else [''] for a in predictions]
top1_preds = [a[0] for a in predictions]
no_ans = sum([a == '' for a in top1_preds])
logger.info(f'no_ans/all: {no_ans}, {len(top1_preds)}')
logger.info(f'Evaluating {len(top1_preds)} answers.')
# Get em/f1
f1s, ems = [], []
for prediction, groundtruth in zip(top1_preds, answers):
if len(groundtruth)==0:
f1s.append(0)
ems.append(0)
continue
f1s.append(max([f1_score(prediction, gt)[0] for gt in groundtruth]))
ems.append(max([exact_match_score(prediction, gt) for gt in groundtruth]))
final_f1, final_em = np.mean(f1s), np.mean(ems)
logger.info('EM: %.2f, F1: %.2f'%(final_em * 100, final_f1 * 100))
# Top 1/k em (or regex em)
exact_match_topk = 0
exact_match_top1 = 0
f1_score_topk = 0
f1_score_top1 = 0
pred_out = {}
for i in range(len(predictions)):
# For debugging
if i < 3:
logger.info(f'{i+1}) {questions[i]}')
logger.info(f'=> groundtruths: {answers[i]}, top 5 prediction: {predictions[i][:5]}')
match_fn = drqa_regex_match_score if args.regex else drqa_exact_match_score
em_topk = max([drqa_metric_max_over_ground_truths(
match_fn, prediction, answers[i]
) for prediction in predictions[i][:args.top_k]])
em_top1 = drqa_metric_max_over_ground_truths(
match_fn, top1_preds[i], answers[i]
)
exact_match_topk += em_topk
exact_match_top1 += em_top1
f1_topk = 0
f1_top1 = 0
if not args.regex:
match_fn = lambda x, y: f1_score(x, y)[0]
f1_topk = max([drqa_metric_max_over_ground_truths(
match_fn, prediction, answers[i]
) for prediction in predictions[i][:args.top_k]])
f1_top1 = drqa_metric_max_over_ground_truths(
match_fn, top1_preds[i], answers[i]
)
f1_score_topk += f1_topk
f1_score_top1 += f1_top1
pred_out[qids[i]] = {
'question': questions[i],
'answer': answers[i], 'prediction': predictions[i], 'score': scores[i], 'title': titles[i],
'evidence': evidences[i] if evidences is not None else '',
'em_top1': bool(em_top1), f'em_top{args.top_k}': bool(em_topk),
'f1_top1': f1_top1, f'f1_top{args.top_k}': f1_topk,
'q_tokens': q_tokens[i] if q_tokens is not None else ['']
}
total = len(predictions)
exact_match_top1 = 100.0 * exact_match_top1 / total
f1_score_top1 = 100.0 * f1_score_top1 / total
logger.info({'exact_match_top1': exact_match_top1, 'f1_score_top1': f1_score_top1})
exact_match_topk = 100.0 * exact_match_topk / total
f1_score_topk = 100.0 * f1_score_topk / total
logger.info({f'exact_match_top{args.top_k}': exact_match_topk, f'f1_score_top{args.top_k}': f1_score_topk})
wandb.log(
{"Top1 EM": exact_match_top1, "Top1 F1": f1_score_top1,
"Topk EM": exact_match_topk, "Topk F1": f1_score_topk}
)
# Dump predictions
if len(args.query_encoder_path) == 0:
pred_dir = os.path.join(os.environ['DPH_SAVE_DIR'], 'pred')
else:
pred_dir = os.path.join(args.query_encoder_path, 'pred')
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
pred_path = os.path.join(
pred_dir, os.path.splitext(os.path.basename(args.test_path))[0] + f'_{total}.pred'
)
if save_predictions:
logger.info(f'Saving prediction file to {pred_path}')
with open(pred_path, 'w') as f:
json.dump(pred_out, f)
return exact_match_top1
def evaluate_results_kilt(predictions, qids, questions, answers, args, evidences, scores, titles):
#wandb.init(project="DensePhrases (KILT)", mode="online" if args.wandb else "disabled")
#wandb.config.update(args)
total=len(predictions)
# load title2id dict and convert predicted titles into wikipedia_ids
with open(args.title2wikiid_path) as f:
title2wikiid = json.load(f)
pred_wikipedia_ids = [[[title2wikiid[t] for t in title_] for title_ in title] for title in titles]
# dump official predictions
if len(args.query_encoder_path) == 0:
pred_dir = os.path.join(os.environ['DPH_SAVE_DIR'], 'pred-kilt')
else:
pred_dir = os.path.join(args.query_encoder_path, 'pred-kilt')
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
pred_official_path = os.path.join(
pred_dir, f'{args.query_encoder_path.split("/")[-1]}_' +
os.path.splitext(os.path.basename(args.test_path))[0] + f'_{total}.jsonl'
)
official_preds_to_save = []
for prediction, question, pred_wikipedia_id, qid in zip(predictions, questions, pred_wikipedia_ids, qids):
outputs = []
for pred, pred_wid in zip(prediction, pred_wikipedia_id):
outputs.append({
'answer': pred,
'provenance':[{'wikipedia_id':pred_wid_} for pred_wid_ in pred_wid]
})
official_preds_to_save.append({
'id': qid,
'input': question,
'output': [outputs[0]]
})
logger.info(f'Saving official prediction file to {pred_official_path}')
kilt_store_data(pred_official_path, official_preds_to_save)
assert '.jsonl' in args.kilt_gold_path, "kilt_gold_path should be .jsonl"
result = kilt_evaluate(
gold=args.kilt_gold_path,
guess=pred_official_path)
# logging results
result_to_logging = {
'accuracy':result['downstream']['accuracy'],
'f1':result['downstream']['f1'],
'KILT-accuracy':result['kilt']['KILT-accuracy'],
'KILT-f1':result['kilt']['KILT-f1'],
'Rprec':result['retrieval']['Rprec'],
'recall@5':result['retrieval']['recall@5']
}
logger.info(result_to_logging)
#wandb.log(result_to_logging)
# make custom predictions
pred_out = {}
for i in range(len(predictions)):
# For debugging
if i < 3:
logger.info(f'{i+1}) {questions[i]}')
logger.info(f'=> groundtruths: {answers[i]}, top 5 prediction: {predictions[i][:5]}')
guess_answer = predictions[i][0]
gold_candidate_answers | |
<gh_stars>0
"""
Tools for converting CPAN packages to conda recipes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import subprocess
import sys
from distutils.version import LooseVersion
from glob import glob
from io import open
from os import makedirs
from os.path import basename, dirname, join, exists
from conda.api import get_index
from conda.fetch import TmpDownload
from conda.resolve import MatchSpec, Resolve
from conda.utils import memoized
from conda_build.config import config
# Python 2.x backward compatibility
if sys.version_info < (3, 0):
str = unicode
CPAN_META = """\
package:
name: {packagename}
version: !!str {version}
source:
{useurl}fn: {filename}
{useurl}url: {cpanurl}
{usemd5}md5: {md5}
# patches:
# List any patch files here
# - fix.patch
{build_comment}build:
# If this is a new build for the same version, increment the build
# number. If you do not include this key, it defaults to 0.
{build_comment}number: 1
requirements:
build:
- perl{build_depends}
run:
- perl{run_depends}
test:
# Perl 'use' tests
{import_comment}imports:{import_tests}
# You can also put a file called run_test.pl (or run_test.py) in the recipe
# that will be run at test time.
# requires:
# Put any additional test requirements here. For example
# - nose
about:
home: {homeurl}
license: {license}
summary: {summary}
# See
# http://docs.continuum.io/conda/build.html for
# more information about meta.yaml
"""
CPAN_BUILD_SH = """\
#!/bin/bash
# If it has Build.PL use that, otherwise use Makefile.PL
if [ -f Build.PL ]; then
perl Build.PL
./Build
./Build test
# Make sure this goes in site
./Build install --installdirs site
elif [ -f Makefile.PL ]; then
# Make sure this goes in site
perl Makefile.PL INSTALLDIRS=site
make
make test
make install
else
echo 'Unable to find Build.PL or Makefile.PL. You need to modify build.sh.'
exit 1
fi
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
"""
CPAN_BLD_BAT = """\
:: If it has Build.PL use that, otherwise use Makefile.PL
IF exist Build.PL (
perl Build.PL
IF errorlevel 1 exit 1
Build
IF errorlevel 1 exit 1
Build test
:: Make sure this goes in site
Build install --installdirs site
IF errorlevel 1 exit 1
) ELSE IF exist Makefile.PL (
:: Make sure this goes in site
perl Makefile.PL INSTALLDIRS=site
IF errorlevel 1 exit 1
make
IF errorlevel 1 exit 1
make test
IF errorlevel 1 exit 1
make install
) ELSE (
ECHO 'Unable to find Build.PL or Makefile.PL. You need to modify bld.bat.'
exit 1
)
:: Add more build steps here, if they are necessary.
:: See
:: http://docs.continuum.io/conda/build.html
:: for a list of environment variables that are set during the build process.
"""
class InvalidReleaseError(RuntimeError):
'''
An exception that is raised when a release is not available on MetaCPAN.
'''
pass
def main(args, parser):
'''
Creates a bunch of CPAN conda recipes.
'''
perl_version = config.CONDA_PERL
package_dicts = {}
[output_dir] = args.output_dir
indent = '\n - '
args.packages = list(reversed(args.packages))
processed_packages = set()
orig_version = args.version
while args.packages:
package = args.packages.pop()
# If we're passed version in the same format as `PACKAGE=VERSION`
# update version
if '=' in package:
package, __, args.version = package.partition('=')
else:
args.version = orig_version
# Skip duplicates
if package in processed_packages:
continue
processed_packages.add(package)
# Convert modules into distributions
orig_package = package
package = dist_for_module(args.meta_cpan_url, package, perl_version)
if package == 'perl':
print(("WARNING: {0} is a Perl core module that is not developed " +
"outside of Perl, so we are skipping creating a recipe " +
"for it.").format(orig_package))
continue
elif package not in {orig_package, orig_package.replace('::', '-')}:
print(("WARNING: {0} was part of the {1} distribution, so we are " +
"making a recipe for {1} instead.").format(orig_package,
package))
latest_release_data = get_release_info(args.meta_cpan_url, package,
None, perl_version)
packagename = perl_to_conda(package)
# Skip duplicates
if ((args.version is not None and ((packagename + '-' + args.version) in
processed_packages)) or
((packagename + '-' + latest_release_data['version']) in
processed_packages)):
continue
d = package_dicts.setdefault(package, {'packagename': packagename,
'run_depends': '',
'build_depends': '',
'build_comment': '# ',
'test_commands': '',
'usemd5': '',
'useurl': '',
'summary': "''",
'import_tests': ''})
# Fetch all metadata from CPAN
core_version = core_module_version(package, perl_version)
release_data = get_release_info(args.meta_cpan_url, package,
(LooseVersion(args.version) if
args.version is not None else
core_version),
perl_version)
# Check if versioned recipe directory already exists
dir_path = join(output_dir, '-'.join((packagename,
release_data['version'])))
if exists(dir_path):
raise RuntimeError("directory already exists: %s" % dir_path)
# If this is something we're downloading, get MD5
if release_data['download_url']:
d['cpanurl'] = release_data['download_url']
d['md5'], size = get_checksum_and_size(release_data['download_url'])
d['filename'] = basename(release_data['archive'])
print("Using url %s (%s) for %s." % (d['cpanurl'], size, package))
else:
d['useurl'] = '#'
d['usemd5'] = '#'
d['cpanurl'] = ''
d['filename'] = ''
d['md5'] = ''
try:
d['homeurl'] = release_data['resources']['homepage']
except KeyError:
d['homeurl'] = 'http://metacpan.org/pod/' + package
if 'abstract' in release_data:
d['summary'] = repr(release_data['abstract']).lstrip('u')
d['license'] = (release_data['license'][0] if
isinstance(release_data['license'], list) else
release_data['license'])
d['version'] = release_data['version']
processed_packages.add(packagename + '-' + d['version'])
# Add Perl version to core module requirements, since these are empty
# packages, unless we're newer than what's in core
if core_version is not None and ((args.version is None) or
(core_version >=
LooseVersion(args.version))):
d['useurl'] = '#'
d['usemd5'] = '#'
empty_recipe = True
# Add dependencies to d if not in core, or newer than what's in core
else:
build_deps, run_deps, packages_to_append = deps_for_package(
package, release_data, perl_version, args, output_dir,
processed_packages)
d['build_depends'] += indent.join([''] + list(build_deps |
run_deps))
d['run_depends'] += indent.join([''] + list(run_deps))
args.packages.extend(packages_to_append)
empty_recipe = False
# Create import tests
module_prefix = package.replace('::', '-').split('-')[0]
if 'provides' in release_data:
for provided_mod in sorted(set(release_data['provides'])):
# Filter out weird modules that don't belong
if (provided_mod.startswith(module_prefix) and
'::_' not in provided_mod):
d['import_tests'] += indent + provided_mod
if d['import_tests']:
d['import_comment'] = ''
else:
d['import_comment'] = '# '
# Write recipe files to a versioned directory
makedirs(dir_path)
print("Writing recipe for %s-%s" % (packagename, d['version']))
with open(join(dir_path, 'meta.yaml'), 'w') as f:
f.write(CPAN_META.format(**d))
with open(join(dir_path, 'build.sh'), 'w') as f:
if empty_recipe:
f.write('#!/bin/bash\necho "Nothing to do."\n')
else:
f.write(CPAN_BUILD_SH.format(**d))
with open(join(dir_path, 'bld.bat'), 'w') as f:
if empty_recipe:
f.write('echo "Nothing to do."\n')
else:
f.write(CPAN_BLD_BAT.format(**d))
print("Done")
@memoized
def latest_pkg_version(pkg):
'''
:returns: the latest version of the specified conda package available
'''
r = Resolve(get_index())
try:
pkg_list = sorted(r.get_pkgs(MatchSpec(pkg)))
except RuntimeError:
pkg_list = None
if pkg_list:
pkg_version = LooseVersion(pkg_list[-1].version)
else:
pkg_version = None
return pkg_version
@memoized
def core_module_version(module, version):
'''
:param module: Name of a Perl core module
:type module: str
:returns: The version of the specified module that is currently available
in the specified version of Perl. If the version is `undef`, but
the module is actually part of the Perl core, the version of Perl
passed in will be used as the module version.
'''
# In case we were given a dist, convert to module
module = module.replace('-', '::')
if version is None:
version = LooseVersion(config.CONDA_PERL)
else:
version = LooseVersion(version)
cmd = ['corelist', '-v', str(version), module]
try:
output = subprocess.check_output(cmd).decode('utf-8')
except subprocess.CalledProcessError:
sys.exit(('Error: command failed: %s\nPlease make sure you have ' +
'the perl conda package installed in your default ' +
'environment.') % ' '.join(cmd))
mod_version = output.split()[1]
# If undefined, that could either mean it's versionless or not in core
if mod_version == 'undef':
# Check if it's actually in core
cmd = ['corelist', module]
output = subprocess.check_output(cmd).decode('utf-8')
# If it's in core...
if 'perl v' in output:
first_version = output.partition('perl v')[2].strip()
first_version = LooseVersion(first_version)
# If it's newer than the specified version, return None
if LooseVersion(first_version) > LooseVersion(version):
mod_version = None
else:
mod_version = version
# If it's not, return None
else:
mod_version = None
else:
mod_version = LooseVersion(mod_version)
return mod_version
def deps_for_package(package, release_data, perl_version, args, output_dir,
processed_packages):
'''
Build the sets of dependencies and packages we need recipes for. This should
only be called for non-core modules/distributions, as dependencies are
ignored for core modules.
:param package: Perl distribution we're checking dependencies of.
:type package: str
:param release_data: The metadata about the current release of the package.
:type release_data: dict
:param perl_version: The target version of Perl we're building this for.
This only really matters for core modules.
:type perl_version: str
:param args: The command-line arguments passed to the skeleton command.
:type args: Namespace
:param output_dir: The output directory to write recipes to
:type output_dir: str
:param processed_packages: The set of packages we have built recipes for
already.
:type processed_packages: set of | |
<filename>lightweight_mmm/plot.py
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting functions pre and post model fitting."""
import functools
from typing import Any, List, Optional, Sequence, Tuple
import arviz
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn import metrics
from lightweight_mmm import lightweight_mmm
from lightweight_mmm import preprocessing
@functools.partial(jax.jit, static_argnames=("media_mix_model"))
def _make_single_prediction(media_mix_model: lightweight_mmm.LightweightMMM,
mock_media: jnp.ndarray,
extra_features: Optional[jnp.ndarray],
seed: Optional[int]
) -> jnp.ndarray:
"""Makes a prediction of a single row.
Serves as a helper function for making predictions individually for each media
channel and one row at a time. It is meant to be used vmaped otherwise it can
be slow as it's meant to be used for plotting curve responses only. Use
lightweight_mmm.LightweightMMM for regular predict functionality.
Args:
media_mix_model: Media mix model to use for getting the predictions.
mock_media: Mock media for this iteration of predictions.
extra_features: Extra features to use for predictions.
seed: Seed to use for PRNGKey during sampling. For replicability run
this function and any other function that gets predictions with the same
seed.
Returns:
A point estimate for the given data.
"""
return media_mix_model.predict(
media=jnp.expand_dims(mock_media, axis=0),
extra_features=extra_features,
seed=seed).mean(axis=0)
@functools.partial(
jax.jit,
static_argnames=("media_mix_model", "target_scaler"))
def _generate_diagonal_predictions(
media_mix_model: lightweight_mmm.LightweightMMM,
media_values: jnp.ndarray,
extra_features: Optional[jnp.ndarray],
target_scaler: Optional[preprocessing.CustomScaler],
prediction_offset: jnp.ndarray,
seed: Optional[int]):
"""Generates predictions for one value per channel leaving the rest to zero.
This function does the following steps:
- Vmaps the single prediction function on axis=0 of the media arg.
- Diagonalizes the media input values so that each value is represented
along side zeros on for the rest of the channels.
- Generate predictions.
- Unscale prediction if target_scaler is given.
Args:
media_mix_model: Media mix model to use for plotting the response curves.
media_values: Media values.
extra_features: Extra features values.
target_scaler: Scaler used for scaling the target, to unscaled values and
plot in the original scale.
prediction_offset: The value of a prediction of an all zero media input.
seed: Seed to use for PRNGKey during sampling. For replicability run
this function and any other function that gets predictions with the same
seed.
Returns:
The predictions for the given data.
"""
make_predictions = jax.vmap(fun=_make_single_prediction,
in_axes=(None, 0, None, None))
diagonal = jnp.eye(media_values.shape[0])
if media_values.ndim == 2: # Only two since we only provide one row
diagonal = jnp.expand_dims(diagonal, axis=-1)
media_values = jnp.expand_dims(media_values, axis=0)
diag_media_values = diagonal * media_values
predictions = make_predictions(
media_mix_model,
diag_media_values,
extra_features,
seed) - prediction_offset
predictions = jnp.squeeze(predictions)
if target_scaler:
predictions = target_scaler.inverse_transform(predictions)
if predictions.ndim == 2:
predictions = jnp.sum(predictions, axis=-1)
return predictions
def _calculate_number_rows_plot(n_media_channels: int, n_columns: int):
"""Calculates the number of rows of plots needed to fit n + 1 plots in n_cols.
Args:
n_media_channels: Number of media channels. The total of plots needed is
n_media_channels + 1.
n_columns: Number of columns in the plot grid.
Returns:
The number of rows of plots needed to fit n + 1 plots in n cols
"""
if n_media_channels % n_columns == 0:
return n_media_channels // n_columns + 1
return n_media_channels // n_columns + 2
def plot_response_curves(
media_mix_model: lightweight_mmm.LightweightMMM,
media_scaler: Optional[preprocessing.CustomScaler] = None,
target_scaler: Optional[preprocessing.CustomScaler] = None,
prices: jnp.ndarray = None,
optimal_allocation_per_timeunit: Optional[jnp.ndarray] = None,
steps: int = 50,
percentage_add: float = 0.2,
apply_log_scale: bool = False,
figure_size: Tuple[int, int] = (10, 14),
n_columns: int = 3,
marker_size: int = 8,
legend_fontsize: int = 8,
seed: Optional[int] = None) -> matplotlib.figure.Figure:
"""Plots the response curves of each media channel based on the model.
It plots an individual subplot for each media channel. If '
optimal_allocation_per_timeunit is given it uses it to add markers based on
historic average spend and the given optimal one on each of the individual
subplots.
It then plots a combined plot with all the response curves which can be
changed to log scale if apply_log_scale is True.
Args:
media_mix_model: Media mix model to use for plotting the response curves.
media_scaler: Scaler that was used to scale the media data before training.
target_scaler: Scaler used for scaling the target, to unscaled values and
plot in the original scale.
prices: Prices to translate the media units to spend. If all your data is
already in spend numbers you can leave this as None. If some of your data
is media spend and others is media unit, leave the media spend with price
1 and add the price to the media unit channels.
optimal_allocation_per_timeunit: Optimal allocation per time unit per media
channel. This can be obtained by running the optimization provided by
LightweightMMM.
steps: Number of steps to simulate.
percentage_add: Percentage too exceed the maximum historic spend for the
simulation of the response curve.
apply_log_scale: Whether to apply the log scale to the predictions (Y axis).
When some media channels have very large scale compare to others it might
be useful to use apply_log_scale=True. Default is False.
figure_size: Size of the plot figure.
n_columns: Number of columns to display in the subplots grid. Modifying this
parameter might require to adjust figure_size accordingly for the plot
to still have reasonable structure.
marker_size: Size of the marker for the optimization annotations. Only
useful if optimal_allocation_per_timeunit is not None. Default is 8.
legend_fontsize: Legend font size for individual subplots.
seed: Seed to use for PRNGKey during sampling. For replicability run
this function and any other function that gets predictions with the same
seed.
Returns:
Plots of response curves.
"""
if not hasattr(media_mix_model, "trace"):
raise lightweight_mmm.NotFittedModelError(
"Model needs to be fit first before attempting to plot its response "
"curves.")
media = media_mix_model.media
media_maxes = media.max(axis=0) * (1 + percentage_add)
if media_mix_model._extra_features is not None:
extra_features = jnp.expand_dims(
media_mix_model._extra_features.mean(axis=0), axis=0)
else:
extra_features = None
media_ranges = jnp.expand_dims(
jnp.linspace(start=0, stop=media_maxes, num=steps), axis=0)
make_predictions = jax.vmap(
jax.vmap(_make_single_prediction,
in_axes=(None, 0, None, None),
out_axes=0),
in_axes=(None, 0, None, None), out_axes=1)
diagonal = jnp.repeat(
jnp.eye(media_mix_model.n_media_channels), steps,
axis=0).reshape(media_mix_model.n_media_channels, steps,
media_mix_model.n_media_channels)
prediction_offset = media_mix_model.predict(
media=jnp.zeros((1, *media.shape[1:])),
extra_features=extra_features).mean(axis=0)
if media.ndim == 3:
diagonal = jnp.expand_dims(diagonal, axis=-1)
prediction_offset = jnp.expand_dims(prediction_offset, axis=0)
mock_media = media_ranges * diagonal
predictions = jnp.squeeze(a=make_predictions(media_mix_model,
mock_media,
extra_features,
seed))
predictions = predictions - prediction_offset
media_ranges = jnp.squeeze(media_ranges)
if target_scaler:
predictions = target_scaler.inverse_transform(predictions)
if media_scaler:
media_ranges = media_scaler.inverse_transform(media_ranges)
if prices is not None:
if media.ndim == 3:
prices = jnp.expand_dims(prices, axis=-1)
media_ranges *= prices
if predictions.ndim == 3:
media_ranges = jnp.sum(media_ranges, axis=-1)
predictions = jnp.sum(predictions, axis=-1)
if optimal_allocation_per_timeunit is not None:
average_allocation = media_mix_model.media.mean(axis=0)
average_allocation_predictions = _generate_diagonal_predictions(
media_mix_model=media_mix_model,
media_values=average_allocation,
extra_features=extra_features,
target_scaler=target_scaler,
prediction_offset=prediction_offset,
seed=seed)
optimal_allocation_predictions = _generate_diagonal_predictions(
media_mix_model=media_mix_model,
media_values=optimal_allocation_per_timeunit,
extra_features=extra_features,
target_scaler=target_scaler,
prediction_offset=prediction_offset,
seed=seed)
if media_scaler:
average_allocation = media_scaler.inverse_transform(average_allocation)
optimal_allocation_per_timeunit = media_scaler.inverse_transform(
optimal_allocation_per_timeunit)
if prices is not None:
optimal_allocation_per_timeunit *= prices
average_allocation *= prices
if media.ndim == 3:
average_allocation = jnp.sum(average_allocation, axis=-1)
optimal_allocation_per_timeunit = jnp.sum(
optimal_allocation_per_timeunit, axis=-1)
kpi_label = "KPI" if target_scaler else "Normalized KPI"
fig = plt.figure(media_mix_model.n_media_channels + 1,
figsize=figure_size,
tight_layout=True)
n_rows = _calculate_number_rows_plot(
n_media_channels=media_mix_model.n_media_channels, n_columns=n_columns)
last_ax = fig.add_subplot(n_rows, 1, n_rows)
for i in range(media_mix_model.n_media_channels):
ax = fig.add_subplot(n_rows, n_columns, i + 1)
sns.lineplot(
x=media_ranges[:, i],
y=predictions[:, i],
label=media_mix_model.media_names[i],
color=sns.color_palette()[i],
ax=ax)
sns.lineplot(
x=media_ranges[:, i],
y=jnp.log(predictions[:, i]) if apply_log_scale else predictions[:, i],
label=media_mix_model.media_names[i],
color=sns.color_palette()[i],
ax=last_ax)
if optimal_allocation_per_timeunit is not None:
ax.plot(
average_allocation[i],
average_allocation_predictions[i],
marker="o",
markersize=marker_size,
label="avg_spend",
color=sns.color_palette()[i])
ax.plot(
optimal_allocation_per_timeunit[i],
optimal_allocation_predictions[i],
marker="x",
markersize=marker_size + 2,
label="optimal_spend",
color=sns.color_palette()[i])
ax.set_ylabel(kpi_label)
ax.set_xlabel("Normalized Spend" if not media_scaler else "Spend")
ax.legend(fontsize=legend_fontsize)
fig.suptitle("Response curves", fontsize=20)
last_ax.set_ylabel(kpi_label if not apply_log_scale else f"log({kpi_label})")
last_ax.set_xlabel("Normalized spend per channel"
if not media_scaler else "Spend per channel")
plt.close()
return fig
def plot_cross_correlate(feature: jnp.ndarray,
target: jnp.ndarray,
maxlags: int = 10) -> Tuple[int, float]:
"""Plots the cross correlation coefficients between 2 vectors.
In the chart look for positive peaks, this shows how the | |
or int(isValue) == 0) :
# сохраним заголовок, если он определен для ЭТОГО класса объектов.
sqlStr = "INSERT INTO " + self._headStruct.getTableName() +" ( " + headParamsObj.strListAttrNames + ") VALUES " +\
"( " + headParamsObj.strListAttrValues + " ) returning " + self._headStruct.getIdFieldName() + "; "
headValue = self.getToInsertValue( self._headStruct.getLisAttr())
_loDb.execute(sqlStr, tuple(headValue) ) # 'dt_header_type','public_key'
sourse = _loDb.fetchone()
self.__dict__[self._headStruct.getIdFieldName()] = sourse[self._headStruct.getIdFieldName()]
elif self._isHeaderEdit and self.getValueIdFieldName(self._headStruct) > 0 :
# Заголовок Объекта поменялся! - его надо сохранить!
if self._headStruct.getLisAttr() != None:
listSet = self.splitAttributes2Update (self._headStruct.getLisAttr())
listWhere = self.splitAttributes2Update (self._headStruct.getMainPrimaryList()) # mainPrimaryList
if len(listSet.listAttrValues) > 0 and len(listWhere.listAttrValues) > 0:
whereStr = ' AND '.join(listWhere.listSetAttrNames)
strSet = ' , '.join(listSet.listSetAttrNames)
sqlStr = "UPDATE " + self._headStruct.getTableName() + " SET " + strSet + " WHERE " + whereStr
toValueList = []
toValueList.extend(listSet.listAttrValues)
toValueList.extend(listWhere.listAttrValues)
_loDb.execute(sqlStr, tuple(toValueList))
if isValue != None and int(isValue) > 0 :
list = []
if self._dataStruct.getMainPrimaryList() != None:
# Надо построить слварь из всех полей, записанных в mainPrimaryList
list = self.getList2Update (self._dataStruct.getMainPrimaryList())
dataParamseObj = self.splitAttributes(self._dataStruct.getLisAttr())
if len(list) > 0:
whtreStr = ' AND '.join(list)
# Все ревизии ЭТОЙ записи - устарели!!!! - проабдейтим список ревизий
sqlStr = "UPDATE " + self._dataStruct.getTableName() + " SET actual_flag = 'O' WHERE " + whtreStr
logging.info(' SAVE:: sqlStr = ' + str(sqlStr))
_loDb.execute(sqlStr)
operation_timestamp = datetime.now()
sha_hash = hashlib.sha256(
tornado.escape.utf8(revisions_sha_hash_source + str(operation_timestamp) )
).hexdigest()
returningStr = ''
if self._dataStruct.getIdFieldName() != None:
returningStr = " returning " + self._dataStruct.getIdFieldName()
# Теперь можно записать новые данные в ревизии.
dataParamseObj = self.splitAttributes(self._dataStruct.getLisAttr())
dataParamseObj.strListAttrNames += ', actual_flag, revision_author_id, operation_flag, sha_hash, operation_timestamp '
dataParamseObj.strListAttrValues += ", %s, %s, %s, %s, %s "
sqlStr = "INSERT INTO " + self._dataStruct.getTableName() +" ( " + dataParamseObj.strListAttrNames + ") VALUES " +\
"( " + dataParamseObj.strListAttrValues + " ) " +\
" ON CONFLICT (sha_hash) DO UPDATE SET actual_flag = 'A' " + returningStr + ' ;'
dataValue = self.getToInsertValue( self._dataStruct.getLisAttr())
dataValue += ['A', autorId, operationFlag, sha_hash, operation_timestamp]
logging.info(' SAVE:: 2 sqlStr = ' + str(sqlStr))
logging.info(' SAVE:: 2 dataValue = ' + str(dataValue))
_loDb.execute(sqlStr, tuple(dataValue))
# logging.error(' save AFTER SAVE self:: ' + str (self) )
# если Это статьи, тогда нам нужнео сохранить статью, и получить ее ИД!
if returningStr != '':
sourse = _loDb.fetchone()
self.__dict__[self._dataStruct.getIdFieldName()] = sourse[self._dataStruct.getIdFieldName()]
# self.commit()
self.commit()
# logging.info(' SAVE:: 2 self._dataStruct.getIdFieldName() = ' + str(self._dataStruct.getIdFieldName()))
# logging.info(' SAVE:: 2 self._headStruct.getIdFieldName() = ' + str(self._headStruct.getIdFieldName()))
if self._dataStruct.getIdFieldName():
return self.__dict__[self._dataStruct.getIdFieldName()]
elif self._headStruct.getIdFieldName():
return self.__dict__[self._headStruct.getIdFieldName()]
except psycopg2.Error as error:
logging.error(' save exception:: ' + str (error) )
logging.error(' save exception:: sqlStr = ' + sqlStr )
logging.error("Exception occurred", exc_info=True)
# _loDb.rollback()
self.rollback()
raise WikiException(error)
def select(self,
selectStr, # строка - чего хотим получить из селекта
addTables, # строка - список ДОПОЛНИТЕЛЬНЫХ таблиц (основную таблизу для объекта указываем при инициализации)
anyParams = {} # все остальные секции селекта
):
"""
получить данные (select)
- ну, вынести его - и делать его из нескольких секций:
- селект (набор полей, котрые хотим получить из выборки)
- фром (набор дополнительных (кроме основной) таблиц для выборк)
anyParams = {
'joinStr': '', # строка - список присоединенных таблиц
'whereStr': '', # строка набор условий для выбора строк
'groupStr': '', # строка группировка
'orderStr': '', # строка порядок строк
'limitStr': '' # строка страница выборки
}
- жоин
- веа
- ордер
- групп
- лимит вот как то так,
общий вид селекта, может выглядеть примерно так:
SELECT
users.author_id,
users.author_login,
users.author_name,
users.author_role,
users.author_phon,
users.author_email,
users.author_external
FROM users
WHERE (author_login = "login" OR author_email = "login" )
AND author_pass = <PASSWORD>9t2DJ7rqm1bwB/PrsH0."
"""
try:
_loDb = self.cursor()
sqlStr = 'SELECT '+ selectStr
if addTables != None:
sqlStr += ' FROM ' + self._dataStruct.getTableName()
if addTables != '': sqlStr += ', ' + str(addTables)
if str(anyParams.get('joinStr', '')) != '': sqlStr += ' ' + str(anyParams.get('joinStr'))
if str(anyParams.get('whereStr', '')) != '': sqlStr += ' WHERE ' + str(anyParams.get('whereStr'))
if str(anyParams.get('groupStr', '')) != '': sqlStr += ' GROUP BY ' + str(anyParams.get('groupStr'))
if str(anyParams.get('orderStr', '')) != '': sqlStr += ' ORDER BY ' + str(anyParams.get('orderStr'))
if str(anyParams.get('limitStr', '')) != '': sqlStr += ' LIMIT ' + str(anyParams.get('limitStr'))
logging.error(' select :: sqlStr = ' + str (sqlStr) )
_loDb.execute(sqlStr)
sourse = _loDb.fetchall()
outListObj = self.dict2obj(sourse)
return outListObj
except psycopg2.Error as error:
logging.error(' select exception:: sqlStr = ' + sqlStr )
self.rollback()
raise WikiException(error)
def rowSelect(self,
selectRow, # строка - селект
):
"""
получить данные (select)
- из просто самого обычного селекта, - СТРОКИ
"""
try:
_loDb = self.cursor()
logging.info('select:: list:: selectRow = ' + str (selectRow) )
_loDb.execute(selectRow)
sourse = _loDb.fetchall()
outListObj = self.dict2obj(sourse)
return outListObj
except psycopg2.Error as error:
logging.error(' rowSelect exception:: sqlStr = ' + sqlStr )
self.rollback()
raise WikiException(error)
def getToInsertValue(self, listTableFields):
"""
получить списокреальных значений атрибутов класса
в момент исполнения метода
отдать кортежем
"""
objValuesNameList = list(self.__dict__.keys())
listAttrValues = []
for objValue in objValuesNameList:
if objValue.find('_') != 0 and (objValue) in listTableFields :
listAttrValues.append(self.__getattribute__(objValue))
return listAttrValues
def getList2Update(self, mainPrimaryList):
"""
получить набор параметров для абдейтов значений
"""
listAttrValues = []
for objValue in mainPrimaryList:
listAttrValues.append(objValue + ' = ' + str(self.__getattribute__(objValue)) )
return listAttrValues
def splitAttributes(self, listTableFields):
"""
разделить собственные параметры (без параметров с подчеркиваниями ) на 2 списка -
1 - список имен параметров
2 - значений параметров
это нужно для того, что бы использовать все параметры в операции
добавления или изменения данных в базе данных.
На входе - список полей таблицы,
и мы из полного набора всех атрибутов в объкте, выберем только те, что есть во входном списке.
На выходе получим словарь из двух списков
"""
# objDict = self.__dict__
objValuesNameList = list(self.__dict__.keys())
class Out: pass
out = Out()
out.listAttrNames = []
out.listAttrValues = []
for objValue in objValuesNameList:
if objValue.find('_') != 0 and (objValue) in listTableFields :
out.listAttrNames.append(objValue)
out.listAttrValues.append(self.__getattribute__(objValue))
out.strListAttrNames = ", ".join(out.listAttrNames)
out.strListAttrValues = ", ".join([ '%s' for row in out.listAttrNames]) # "'" + "', '".join(map(str,listAttrValues)) + "'"
return out
def splitAttributes2Update(self, listTableFields):
"""
Для генерации Апдейта
разделить собственные параметры (без параметров с подчеркиваниями ) на 2 списка -
1 - список имен параметров
2 - значений параметров
это нужно для того, что бы использовать все параметры в операции
добавления или изменения данных в базе данных.
На входе - список полей таблицы,
и мы из полного набора всех атрибутов в объкте, выберем только те, что есть во входном списке.
На выходе получим объект из двух списков
"""
objValuesNameList = list(self.__dict__.keys())
class Out: pass
out = Out()
out.listAttrValues = list()
out.listSetAttrNames = list()
for objValue in objValuesNameList:
if objValue.find('_') != 0 and (objValue) in listTableFields :
out.listAttrValues.append(self.__getattribute__(objValue))
out.listSetAttrNames.append( ' ' + objValue + ' = %s' )
return out
def dict2obj(self, dictSou):
"""
преобразовать словарь (допустим, кортеж данных из селекта) в объект
"""
oList = []
if len(dictSou) == 0: return oList
for row in dictSou:
# logging.info(' dict2obj:: row = ' + str(row))
# logging.info(' dict2obj:: type(row) = ' + str(type(row)))
rowDict = dict(row)
# logging.info(' dict2obj:: rowDict = ' + str(rowDict))
oneObj = self.__class__()
for key in rowDict.items(): #.__getattribute__(name):
# logging.info(' dict2obj:: key = ' + str(key))
oneObj.__setattr__(key[0], key[1])
oList.append(oneObj)
return oList
def getValueIdFieldName (self, vStruct):
try:
idName = vStruct.getIdFieldName()
if idName != None:
return self.__dict__[idName]
except:
return None
def splitAttributes2Str(self):
"""
разделить собственные параметры (без параметров с подчеркиваниями ) на 2 списка -
1 - список имен параметров
2 - значений параметров
это нужно для того, что бы использовать все параметры в операции
добавления или изменения | |
== flop[2]+1 and x[1] == flop[2]-1) or (x[0] == flop[2]+2 and x[1] == flop[2]+1))]
if board_type == "two-tone":
my_hands_s_3_to_straight_low_end_bdfd = [(x, 1) for x in my_hands[1] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
my_hands_o_3_to_straight_low_end_bdfd = [(x, 6) for x in my_hands[2] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
elif board_type == "rainbow":
my_hands_s_3_to_straight_low_end_bdfd = [(x, 3) for x in my_hands[1] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
my_hands_o_3_to_straight_low_end_bdfd = []
else:
my_hands_s_3_to_straight_low_end_bdfd = []
my_hands_o_3_to_straight_low_end_bdfd = []
if board_type == "two-tone":
my_hands_s_3_to_straight_low_end = [(x, 4) for x in my_hands[1] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
my_hands_o_3_to_straight_low_end = [(x, 12) for x in my_hands[2] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
elif board_type == "rainbow":
my_hands_s_3_to_straight_low_end = [(x, 4) for x in my_hands[1] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
my_hands_o_3_to_straight_low_end = [(x, 12) for x in my_hands[2] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
else:
my_hands_s_3_to_straight_low_end = [(x, 4) for x in my_hands[1] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
my_hands_o_3_to_straight_low_end = [(x, 12) for x in my_hands[2] if x[0] != 13 and (x[0] not in flop and x[1] not in flop) and ((x[0] == flop[0]-1 and x[1] == flop[0]-2) or (x[0] == flop[1]-1 and x[1] == flop[1]-2) or (x[0] == flop[2]-1 and x[1] == flop[2]-2))]
if board_type == "two-tone":
my_hands_s_5_unique_cards_within_7_values_bdfd = [(x, 1) for x in my_hands[1] if (x[0] not in flop and x[1] not in flop) and max(flop+x) - min(flop+x) <= 7]
my_hands_o_5_unique_cards_within_7_values_bdfd = [(x, 6) for x in my_hands[2] if (x[0] not in flop and x[1] not in flop) and max(flop+x) - min(flop+x) <= 7]
elif board_type == "rainbow":
my_hands_s_5_unique_cards_within_7_values_bdfd = [(x, 3) for x in my_hands[1] if (x[0] not in flop and x[1] not in flop) and max(flop+x) - min(flop+x) <= 7]
my_hands_o_5_unique_cards_within_7_values_bdfd = []
else:
my_hands_s_5_unique_cards_within_7_values_bdfd = []
my_hands_o_5_unique_cards_within_7_values_bdfd = []
if board_type == "two-tone":
my_hands_pp_q_minus_bdfd = [(x, 3) for x in my_hands[0] if (x[0] not in flop) and x[0] <= 12]
my_hands_s_q_minus_bdfd = [(x, 1) for x in my_hands[1] if (x[0] not in flop and x[1] not in flop) and x[0] <= 12]
my_hands_o_q_minus_bdfd = [(x, 6) for x in my_hands[2] if (x[0] not in flop and x[1] not in flop) and x[0] <= 12]
elif board_type == "rainbow":
my_hands_pp_q_minus_bdfd = []
my_hands_s_q_minus_bdfd = [(x, 3) for x in my_hands[1] if (x[0] not in flop and x[1] not in flop) and x[0] <= 12]
my_hands_o_q_minus_bdfd = []
else:
my_hands_pp_q_minus_bdfd = []
my_hands_s_q_minus_bdfd = []
my_hands_o_q_minus_bdfd = []
#### 3 cards within 4 values with two overcards
my_hands_s_lowest_card_is_one_of_3_cards_within_4_values_and_two_overcards = [(x, 4) for x in my_hands[1] if x[1] > flop[0] and ((max(flop + [x[1]]) - sorted(set(flop + [x[1]] + [-20,-19,-18]))[-3] <= 3) or (max(flop + x) - sorted(set(flop + x + [-20,-19,-18]))[-3] <= 3))]
my_hands_o_lowest_card_is_one_of_3_cards_within_4_values_and_two_overcards = [(x, 12) for x in my_hands[2] if x[1] > flop[0] and ((max(flop + [x[1]]) - sorted(set(flop + [x[1]] + [-20,-19,-18]))[-3] <= 3) or (max(flop + x) - sorted(set(flop + x + [-20,-19,-18]))[-3] <= 3))]
if board_type == "two-tone":
my_hands_pp_a_minus_bdfd = [(x, 3) for x in my_hands[0] if (x[0] not in flop) and x[0] > 12 and x[0] <= 14]
my_hands_s_a_minus_bdfd = [(x, 1) for x in my_hands[1] if (x[0] not in flop and x[1] not in flop) and x[0] > 12 and x[0] <= 14]
my_hands_o_a_minus_bdfd = [(x, 6) for x in my_hands[2] if (x[0] not in flop and x[1] not in flop) and x[0] > 12 and x[0] <= 14]
elif board_type == "rainbow":
my_hands_pp_a_minus_bdfd = []
my_hands_s_a_minus_bdfd = [(x, 3) for x in my_hands[1] if (x[0] not in flop and x[1] not in flop) and x[0] > 12 and x[0] <= 14]
my_hands_o_a_minus_bdfd = []
else:
my_hands_pp_a_minus_bdfd = []
my_hands_s_a_minus_bdfd = []
my_hands_o_a_minus_bdfd = []
# Important note: lower ranked rules may include higher ranked hands
# Also tp_j_kicker includes trips because it's okay that it does because of the theory:
# actions taken by one hand are taken by all better hands within the cat
# ! Just be careful that you remove cat1 hands from final cat2, same with cat3 with both cat1 & 2
# Might want to QA with a hand matrix coloring compare with the existing matrix based on default rules
# Cat1
#### Assuming no flushes (monotone boards) for simplicity
opponents_hands_s_straight = [] if is_paired else [(x, 4) for x in opponents_hands[1] if max(x + flop) - min(x + flop) == 4 or max([1 if y == 14 else y for y in (x + flop)]) - min([1 if y == 14 else y for y in (x + flop)]) == 4]
opponents_hands_o_straight = [] if is_paired else [(x, 12) for x in opponents_hands[2] if max(x + flop) - min(x + flop) == 4 or max([1 if y == 14 else y for y in (x + flop)]) - min([1 if y == 14 else y for y in (x + flop)]) == 4]
opponents_hands_pp_sets = [(x, 3) for x in opponents_hands[0] if x[0] in flop]
opponents_hands_s_trips = [] if not is_paired else [(x, 2) for x in opponents_hands[1] if x[0] == paired_value or x[1] == paired_value]
opponents_hands_o_trips = [] if not is_paired else [(x, 6) for x in opponents_hands[2] if x[0] == paired_value or x[1] == paired_value]
# 2 combos most times, not 3; 7 more often than 6
opponents_hands_s_two_pair = [] if is_paired else [(x, 2) for x in opponents_hands[1] if x[0] in flop and x[1] in flop]
opponents_hands_o_two_pair = [] if is_paired else [(x, 7) for x in opponents_hands[2] if x[0] in flop and x[1] in flop]
opponents_hands_pp_overpair_9plus = [(x, 6) for x in opponents_hands[0] if x[0] > flop[0] and x[0] >= 9]
opponents_hands_pp_any_overpair = [(x, 6) for x in opponents_hands[0] if x[0] > flop[0]]
opponents_hands_s_tp_k_kicker = [(x, 3) for x in opponents_hands[1] if (x[0] == flop[0] and x[1] >= 13) or (x[1] == flop[0] and x[0] >= 13)]
opponents_hands_o_tp_k_kicker = [(x, 9) for x in opponents_hands[2] if (x[0] == flop[0] and x[1] >= 13) or (x[1] == flop[0] and x[0] >= 13)]
opponents_hands_s_tp_j_kicker = [(x, 3) for x in opponents_hands[1] if (x[0] == flop[0] and x[1] >= 11 and x[1] <= 12 and x[1] not in flop) or (x[1] == flop[0] and x[0] >= 11 and x[0] <= 12 and x[0] not in flop)]
opponents_hands_o_tp_j_kicker = [(x, 9) for | |
*args) -> "void":
r"""
*Overload 1:*
获取椭圆属性
|
*Overload 2:*
设置椭圆属性
|
*Overload 3:*
设置椭圆属性
"""
return _gskernel.GsArcPointSymbol_EllipseParameter(self, *args)
def LongAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取长轴长度
|
*Overload 2:*
设置长轴长度
"""
return _gskernel.GsArcPointSymbol_LongAxis(self, *args)
def ShortAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取短轴长度
|
*Overload 2:*
设置短轴长度
"""
return _gskernel.GsArcPointSymbol_ShortAxis(self, *args)
def Size(self, *args) -> "void":
r"""
*Overload 1:*
获取符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsArcPointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 点符号的矩形范围"""
return _gskernel.GsArcPointSymbol_Envelope(self)
def __init__(self, *args):
_gskernel.GsArcPointSymbol_swiginit(self, _gskernel.new_GsArcPointSymbol(*args))
__swig_destroy__ = _gskernel.delete_GsArcPointSymbol
def StartAngle(self, *args) -> "void":
r"""
*Overload 1:*
截取圆中arc的起始角度
|
*Overload 2:*
设置截取圆中arc的起始角度
"""
return _gskernel.GsArcPointSymbol_StartAngle(self, *args)
def EndAngle(self, *args) -> "void":
r"""
*Overload 1:*
截取圆中arc的终止角度
|
*Overload 2:*
设置截取圆中arc的终止角度
"""
return _gskernel.GsArcPointSymbol_EndAngle(self, *args)
def LineWidth(self, *args) -> "void":
r"""
*Overload 1:*
符号的线宽
|
*Overload 2:*
设置符号的线宽
"""
return _gskernel.GsArcPointSymbol_LineWidth(self, *args)
# Register GsArcPointSymbol in _gskernel:
_gskernel.GsArcPointSymbol_swigregister(GsArcPointSymbol)
class GsLinePointSymbol(GsPointSymbol):
r""" 线的点符号。"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
默认构造函数
|
*Overload 2:*
从颜色和线宽构造
"""
_gskernel.GsLinePointSymbol_swiginit(self, _gskernel.new_GsLinePointSymbol(*args))
__swig_destroy__ = _gskernel.delete_GsLinePointSymbol
def StartPoint(self, *args) -> "GsRawPoint":
r"""
*Overload 1:*
设置LinePoint的起点坐标
|
*Overload 2:*
获取LinePoint的起点
"""
return _gskernel.GsLinePointSymbol_StartPoint(self, *args)
def EndPoint(self, *args) -> "GsRawPoint":
r"""
*Overload 1:*
设置LinePoint的终点坐标
|
*Overload 2:*
获取LinePoint的终点
"""
return _gskernel.GsLinePointSymbol_EndPoint(self, *args)
def Width(self, *args) -> "double":
r"""
*Overload 1:*
设置线宽度
|
*Overload 2:*
获取线宽度
"""
return _gskernel.GsLinePointSymbol_Width(self, *args)
def Size(self, *args) -> "void":
r"""
*Overload 1:*
符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsLinePointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 点符号的矩形范围"""
return _gskernel.GsLinePointSymbol_Envelope(self)
# Register GsLinePointSymbol in _gskernel:
_gskernel.GsLinePointSymbol_swigregister(GsLinePointSymbol)
class GsCurvelinePointSymbol(GsPointSymbol):
r""" 曲线点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def CurvePoints(self, *args) -> "void":
r"""
*Overload 1:*
获取节点类型数组
|
*Overload 2:*
获取的节点类型数组
|
*Overload 3:*
设置点
"""
return _gskernel.GsCurvelinePointSymbol_CurvePoints(self, *args)
def Size(self, *args) -> "double":
r"""
*Overload 1:*
设置符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsCurvelinePointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 符号的矩形范围"""
return _gskernel.GsCurvelinePointSymbol_Envelope(self)
def __init__(self, *args):
_gskernel.GsCurvelinePointSymbol_swiginit(self, _gskernel.new_GsCurvelinePointSymbol(*args))
__swig_destroy__ = _gskernel.delete_GsCurvelinePointSymbol
def Width(self, *args) -> "void":
r"""
*Overload 1:*
获取线宽度
|
*Overload 2:*
设置线宽度
"""
return _gskernel.GsCurvelinePointSymbol_Width(self, *args)
# Register GsCurvelinePointSymbol in _gskernel:
_gskernel.GsCurvelinePointSymbol_swigregister(GsCurvelinePointSymbol)
class GsPolygonPointSymbol(GsSurfacePointSymbol):
r""" 面点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def CurvePoints(self, *args) -> "void":
r"""
*Overload 1:*
获取节点类型数组
|
*Overload 2:*
获取的节点类型数组
|
*Overload 3:*
设置点
"""
return _gskernel.GsPolygonPointSymbol_CurvePoints(self, *args)
def Size(self, *args) -> "double":
r"""
*Overload 1:*
设置符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsPolygonPointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 符号的矩形范围"""
return _gskernel.GsPolygonPointSymbol_Envelope(self)
def __init__(self, *args):
_gskernel.GsPolygonPointSymbol_swiginit(self, _gskernel.new_GsPolygonPointSymbol(*args))
__swig_destroy__ = _gskernel.delete_GsPolygonPointSymbol
# Register GsPolygonPointSymbol in _gskernel:
_gskernel.GsPolygonPointSymbol_swigregister(GsPolygonPointSymbol)
eAsteristkOrdination = _gskernel.eAsteristkOrdination
r""" 普通型"""
eAsteristkCross = _gskernel.eAsteristkCross
r""" 交叉型"""
eAsteristkInternal = _gskernel.eAsteristkInternal
r""" 内接型"""
eAsteristkRadiation = _gskernel.eAsteristkRadiation
r""" 辐射型"""
class GsEllipseSurfacePointSymbol(GsSurfacePointSymbol):
r""" 类椭圆点符号基类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsEllipseSurfacePointSymbol
def EllipseParameter(self, *args) -> "void":
r"""
*Overload 1:*
获取椭圆属性
|
*Overload 2:*
设置椭圆属性
|
*Overload 3:*
设置椭圆属性
"""
return _gskernel.GsEllipseSurfacePointSymbol_EllipseParameter(self, *args)
def LongAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取长轴长度
|
*Overload 2:*
设置长轴长度
"""
return _gskernel.GsEllipseSurfacePointSymbol_LongAxis(self, *args)
def ShortAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取短轴长度
|
*Overload 2:*
设置短轴长度
"""
return _gskernel.GsEllipseSurfacePointSymbol_ShortAxis(self, *args)
def Size(self, *args) -> "void":
r"""
*Overload 1:*
获取符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsEllipseSurfacePointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 点符号的矩形范围"""
return _gskernel.GsEllipseSurfacePointSymbol_Envelope(self)
# Register GsEllipseSurfacePointSymbol in _gskernel:
_gskernel.GsEllipseSurfacePointSymbol_swigregister(GsEllipseSurfacePointSymbol)
class GsAsteriskPointSymbol(GsEllipseSurfacePointSymbol):
r""" 星形点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def EllipseParameter(self, *args) -> "void":
r"""
*Overload 1:*
获取椭圆属性
|
*Overload 2:*
设置椭圆属性
|
*Overload 3:*
设置椭圆属性
"""
return _gskernel.GsAsteriskPointSymbol_EllipseParameter(self, *args)
def LongAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取长轴长度
|
*Overload 2:*
设置长轴长度
"""
return _gskernel.GsAsteriskPointSymbol_LongAxis(self, *args)
def ShortAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取短轴长度
|
*Overload 2:*
设置短轴长度
"""
return _gskernel.GsAsteriskPointSymbol_ShortAxis(self, *args)
def Size(self, *args) -> "void":
r"""
*Overload 1:*
获取符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsAsteriskPointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 点符号的矩形范围"""
return _gskernel.GsAsteriskPointSymbol_Envelope(self)
def __init__(self):
_gskernel.GsAsteriskPointSymbol_swiginit(self, _gskernel.new_GsAsteriskPointSymbol())
__swig_destroy__ = _gskernel.delete_GsAsteriskPointSymbol
def Rate(self, *args) -> "void":
r"""
*Overload 1:*
比例
|
*Overload 2:*
设置比例
"""
return _gskernel.GsAsteriskPointSymbol_Rate(self, *args)
def AsterType(self, *args) -> "void":
r"""
*Overload 1:*
获取星型的类型
|
*Overload 2:*
设置星型的类型
"""
return _gskernel.GsAsteriskPointSymbol_AsterType(self, *args)
def Corners(self, *args) -> "void":
r"""
*Overload 1:*
获取星型角的数量
|
*Overload 2:*
设置星型角的数量
"""
return _gskernel.GsAsteriskPointSymbol_Corners(self, *args)
def InterStartAngle(self, *args) -> "void":
r"""
*Overload 1:*
获取外角的开始角度
|
*Overload 2:*
设置外角的开始角度
"""
return _gskernel.GsAsteriskPointSymbol_InterStartAngle(self, *args)
def InnerStartAngle(self, *args) -> "void":
r"""
*Overload 1:*
获取内角的开始角度
|
*Overload 2:*
设置内角的开始角度
"""
return _gskernel.GsAsteriskPointSymbol_InnerStartAngle(self, *args)
# Register GsAsteriskPointSymbol in _gskernel:
_gskernel.GsAsteriskPointSymbol_swigregister(GsAsteriskPointSymbol)
class GsChordPointSymbol(GsSurfacePointSymbol):
r""" 弦点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def EllipseParameter(self, *args) -> "void":
r"""
*Overload 1:*
获取椭圆属性
|
*Overload 2:*
设置椭圆属性
|
*Overload 3:*
设置椭圆属性
"""
return _gskernel.GsChordPointSymbol_EllipseParameter(self, *args)
def LongAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取长轴长度
|
*Overload 2:*
设置长轴长度
"""
return _gskernel.GsChordPointSymbol_LongAxis(self, *args)
def ShortAxis(self, *args) -> "void":
r"""
*Overload 1:*
获取短轴长度
|
*Overload 2:*
设置短轴长度
"""
return _gskernel.GsChordPointSymbol_ShortAxis(self, *args)
def Envelope(self) -> "GsBox":
r""" 设置符号大小 点符号的矩形范围"""
return _gskernel.GsChordPointSymbol_Envelope(self)
def __init__(self, *args):
_gskernel.GsChordPointSymbol_swiginit(self, _gskernel.new_GsChordPointSymbol(*args))
__swig_destroy__ = _gskernel.delete_GsChordPointSymbol
def StartAngle(self, *args) -> "void":
r"""
*Overload 1:*
截取圆中arc的起始角度
|
*Overload 2:*
设置开始角度
"""
return _gskernel.GsChordPointSymbol_StartAngle(self, *args)
def EndAngle(self, *args) -> "void":
r"""
*Overload 1:*
获取结束角度
|
*Overload 2:*
设置结束角度
"""
return _gskernel.GsChordPointSymbol_EndAngle(self, *args)
def Size(self, dblSize: 'double') -> "void":
r""" 获取符号大小 设置符号大小,跟据设置的大小,调整长短轴"""
return _gskernel.GsChordPointSymbol_Size(self, dblSize)
# Register GsChordPointSymbol in _gskernel:
_gskernel.GsChordPointSymbol_swigregister(GsChordPointSymbol)
class GsCurvegonPointSymbol(GsSurfacePointSymbol):
r""" 由贝塞尔曲线构成的面点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def CurvePoints(self, *args) -> "void":
r"""
*Overload 1:*
获取节点类型数组
|
*Overload 2:*
获取的节点类型数组
|
*Overload 3:*
设置点
"""
return _gskernel.GsCurvegonPointSymbol_CurvePoints(self, *args)
def Size(self, *args) -> "double":
r"""
*Overload 1:*
设置符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsCurvegonPointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 符号的矩形范围"""
return _gskernel.GsCurvegonPointSymbol_Envelope(self)
def __init__(self, *args):
_gskernel.GsCurvegonPointSymbol_swiginit(self, _gskernel.new_GsCurvegonPointSymbol(*args))
__swig_destroy__ = _gskernel.delete_GsCurvegonPointSymbol
# Register GsCurvegonPointSymbol in _gskernel:
_gskernel.GsCurvegonPointSymbol_swigregister(GsCurvegonPointSymbol)
ePictureNoKeep = _gskernel.ePictureNoKeep
ePictureKeepWidth = _gskernel.ePictureKeepWidth
ePictureKeepHeight = _gskernel.ePictureKeepHeight
ePictureKeepAll = _gskernel.ePictureKeepAll
class GsPicturePointSymbol(GsPointSymbol):
r""" 图片点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
无参构造函数
|
*Overload 2:*
路径导入图片构造函数
"""
_gskernel.GsPicturePointSymbol_swiginit(self, _gskernel.new_GsPicturePointSymbol(*args))
def Transparent(self, *args) -> "void":
r"""
*Overload 1:*
获取是否使用透明色
|
*Overload 2:*
设置是否使用透明色
"""
return _gskernel.GsPicturePointSymbol_Transparent(self, *args)
def TransparentColor(self, *args) -> "void":
r"""
*Overload 1:*
获取要透明的颜色
|
*Overload 2:*
设置要透明的颜色
"""
return _gskernel.GsPicturePointSymbol_TransparentColor(self, *args)
def UseBackColor(self, *args) -> "void":
r"""
*Overload 1:*
获取是否使用背景色
|
*Overload 2:*
设置是否使用背景色
"""
return _gskernel.GsPicturePointSymbol_UseBackColor(self, *args)
def BackColor(self, *args) -> "void":
r"""
*Overload 1:*
获取背景色
|
*Overload 2:*
设置背景色
"""
return _gskernel.GsPicturePointSymbol_BackColor(self, *args)
def PictureData(self) -> "GsGrowByteBuffer *":
r""" 获取图片的数据"""
return _gskernel.GsPicturePointSymbol_PictureData(self)
def Picture(self, *args) -> "void":
r"""
*Overload 1:*
获取图片对象
|
*Overload 2:*
设置图片对象
"""
return _gskernel.GsPicturePointSymbol_Picture(self, *args)
def Width(self, *args) -> "void":
r"""
*Overload 1:*
获取图片宽度(单位毫米)
|
*Overload 2:*
设置图片宽度(单位毫米)
"""
return _gskernel.GsPicturePointSymbol_Width(self, *args)
def Height(self, *args) -> "void":
r"""
*Overload 1:*
获取图片高度(单位毫米)
|
*Overload 2:*
设置图片高度(单位毫米)
"""
return _gskernel.GsPicturePointSymbol_Height(self, *args)
def OriginalState(self, *args) -> "void":
r"""
*Overload 1:*
获取图片锁定状态
|
*Overload 2:*
设置图片锁定状态
"""
return _gskernel.GsPicturePointSymbol_OriginalState(self, *args)
def LoadPicture(self, *args) -> "bool":
r"""
*Overload 1:*
从文件载入图片
|
*Overload 2:*
从内存块载入图片。
"""
return _gskernel.GsPicturePointSymbol_LoadPicture(self, *args)
__swig_destroy__ = _gskernel.delete_GsPicturePointSymbol
# Register GsPicturePointSymbol in _gskernel:
_gskernel.GsPicturePointSymbol_swigregister(GsPicturePointSymbol)
class GsCurvePointSymbol(GsPointSymbol):
r""" 支持多个点构成曲线、曲线面的点符号模板基类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def CurvePoints(self, *args) -> "void":
r"""
*Overload 1:*
获取节点类型数组
|
*Overload 2:*
获取的节点类型数组
|
*Overload 3:*
设置点
"""
return _gskernel.GsCurvePointSymbol_CurvePoints(self, *args)
def Size(self, *args) -> "double":
r"""
*Overload 1:*
设置符号大小
|
*Overload 2:*
获取符号大小
"""
return _gskernel.GsCurvePointSymbol_Size(self, *args)
def Envelope(self) -> "GsBox":
r""" 符号的矩形范围"""
return _gskernel.GsCurvePointSymbol_Envelope(self)
__swig_destroy__ = _gskernel.delete_GsCurvePointSymbol
# Register GsCurvePointSymbol in _gskernel:
_gskernel.GsCurvePointSymbol_swigregister(GsCurvePointSymbol)
class GsPolylinePointSymbol(GsCurvePointSymbol):
r""" 折线点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def CurvePoints(self, *args) -> "void":
r"""
*Overload 1:*
获取节点类型数组
|
*Overload 2:*
获取的节点类型数组
|
| |
# coding:utf-8
# Author: 阿财(<EMAIL>)(<EMAIL>)
# Created date: 2020-02-27
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
from QUANTAXIS.QAIndicator.base import *
try:
import talib
except:
pass
#print('PLEASE install TALIB to call these methods')
"""
完全用nparray传递参数的talib库,原因是用nparray因为没有 Series MultiIndex 的问题。
处理和补完速度都比pandas更快,转 pd.DataFrame/pd.Series 只需要一个构造函数。
我还引入了一些talib没有,但是写出来不超过10行代码,很容易实现好指标,源自外网(TradingView或者MQ4/5)找到的。
例如
Moving Average ADX
Hull Moving Average
Volume HMA(完成)
"""
# 定义MACD函数
def TA_MACD(prices:np.ndarray,
fastperiod:int=12,
slowperiod:int=26,
signalperiod:int=9) -> np.ndarray:
'''
参数设置:
fastperiod = 12
slowperiod = 26
signalperiod = 9
返回: macd - dif, signal - dea, hist * 2 - bar, delta
'''
macd, signal, hist = talib.MACD(prices,
fastperiod=fastperiod,
slowperiod=slowperiod,
signalperiod=signalperiod)
hist = (macd - signal) * 2
delta = np.r_[np.nan, np.diff(hist)]
return np.c_[macd, signal, hist, delta]
# 定义RSI函数
def TA_RSI(prices:np.ndarray,
timeperiod:int=12) -> np.ndarray:
'''
参数设置:
timeperiod = 12
返回: ma
'''
rsi = talib.RSI(prices, timeperiod=timeperiod)
delta = np.r_[np.nan, np.diff(rsi)]
return np.c_[rsi, delta]
# 定义RSI函数
def TA_BBANDS(prices:np.ndarray,
timeperiod:int=5,
nbdevup:int=2,
nbdevdn:int=2,
matype:int=0) -> np.ndarray:
'''
参数设置:
timeperiod = 5
nbdevup = 2
nbdevdn = 2
返回: up, middle, low
'''
up, middle, low = talib.BBANDS(prices,
timeperiod,
nbdevup,
nbdevdn,
matype)
ch = (up - low) / middle
delta = np.r_[np.nan, np.diff(ch)]
return np.c_[up, middle, low, ch, delta]
def TA_KDJ(high:np.ndarray,
low:np.ndarray,
close:np.ndarray,
fastk_period:int=9,
slowk_matype:int=0,
slowk_period:int=3,
slowd_period:int=3) -> np.ndarray:
'''
参数设置:
fastk_period = 9
lowk_matype = 0,
slowk_period = 3,
slowd_period = 3
返回: K, D, J
'''
K, D = talib.STOCH(high,
low,
close,
fastk_period=fastk_period,
slowk_matype=slowk_matype,
slowk_period=slowk_period,
slowd_period=slowd_period)
J = 3 * K - 2 * D
delta = np.r_[np.nan, np.diff(J)]
return np.c_[K, D, J, delta]
def TA_ADX(high, low, close, timeperiod=14) -> np.ndarray:
"""
ADX - Average Directional Movement Index
"""
real = talib.ADX(high, low, close, timeperiod=timeperiod)
return np.c_[real]
def TA_ADXR(high, low, close, timeperiod=14) -> np.ndarray:
"""
名称:平均趋向指数的趋向指数
简介:使用ADXR指标,指标判断ADX趋势。
ADXR - Average Directional Movement Index Rating
"""
real = talib.ADXR(high, low, close, timeperiod=timeperiod)
return np.c_[real]
def TA_CCI(high:np.ndarray,
low:np.ndarray,
close:np.ndarray,
timeperiod:int=14) -> np.ndarray:
"""
名称:平均趋向指数的趋向指数
简介:使用CCI指标,指标判断CCI趋势。
CCI - Commodity Channel Index
"""
real = talib.CCI(high,
low,
close,
timeperiod=timeperiod)
delta = np.r_[np.nan, np.diff(real)]
return np.c_[real, delta]
def TA_KAMA(close, timeperiod=30):
"""
请直接用 talib.KAMA(close, timeperiod)
KAMA - Kaufman Adaptive Moving Average
"""
real = talib.KAMA(close, timeperiod=timeperiod)
return np.c_[real]
def TA_HMA(close, period):
"""
赫尔移动平均线(HMA)
Hull Moving Average.
Formula:
HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n)
"""
hma = talib.WMA(2 * talib.WMA(close, int(period / 2)) - talib.WMA(close, period), int(np.sqrt(period)))
return hma
def ADX_MA(data, period=14, smooth=14, limit=18):
"""
Moving Average ADX
ADX Smoothing Trend Color Change on Moving Average and ADX Cross. Use on Hourly Charts - Green UpTrend - Red DownTrend - Black Choppy No Trend
Source: https://www.tradingview.com/script/owwws7dM-Moving-Average-ADX/
Translator: 阿财(<EMAIL>)(<EMAIL>)
Parameters
----------
data : (N,) array_like
传入 OHLC Kline 序列。
The OHLC Kline.
period : int or None, optional
DI 统计周期 默认值为 14
DI Length period. Default value is 10.
smooth : int or None, optional
ADX 平滑周期 默认值为 14
ADX smoothing length period. Default value is 10.
limit : int or None, optional
ADX 限制阈值 默认值为 18
ADX MA Active limit threshold. Default value is 18.
Returns
-------
adx, ADXm : ndarray
ADXm 指标和趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨)
ADXm indicator and thread directions sequence. (-1, 0, 1) means for (Neagtive, No Trend, Positive)
"""
up = data.high.pct_change()
down = data.low.pct_change() * -1
trur = TA_HMA(talib.TRANGE(data.high.values, data.low.values, data.close.values) , period)
plus = 100 * TA_HMA(np.where(((up > down) & (up > 0)), up, 0), period) / trur
minus = 100 * TA_HMA(np.where(((down > up) & (down > 0)), down, 0), period) / trur
# 这里是dropna的替代解决办法,因为我觉得nparray的传递方式如果随便drop了可能会跟 data.index
# 对不上,所以我选择补零替代dropna
plus = np.r_[np.zeros(period + 2), plus[(period + 2):]]
minus = np.r_[np.zeros(period + 2), minus[(period + 2):]]
sum = plus + minus
adx = 100 * TA_HMA(abs(plus - minus) / (np.where((sum == 0), 1, sum)), smooth)
adx = np.r_[np.zeros(smooth + 2), adx[(smooth + 2):]]
ADXm = np.where(((adx > limit) & (plus > minus)), 1, np.where(((adx > limit) & (plus < minus)), -1, 0))
return adx, ADXm
def ATR_RSI_Stops(data, period=10):
"""
ATR 趋势判断指标 RSI在40~60区间实现牛熊趋势变化指示
This simple indicator gives you a bias on the market that can be used as a filter, an entry indicator for pullbacks,...
It shows the special relationship I discovered between the rsi and the 27 period ema
and the relation between the 40/60 levels of the rsi and the atr offset of the 27 ema line
Source: https://cn.tradingview.com/script/rqzryhZ2-Rsi-Stops-JD/
Translator: 阿财(<EMAIL>)(4910163#qq.<EMAIL>)
Parameters
----------
data : (N,) array_like
传入 OHLC Kline 序列。
The OHLC Kline.
period : int or None, optional
DI 统计周期 默认值为 10
DI Length period. Default value is 10.
Returns
-------
rsi_ma, stop_line, directions : ndarray
rsi_ma, stop_line 指标和 directions 趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨)
rsi_ma, stop_line indicator and thread directions sequence. (-1, 0, 1) means for (Neagtive, No Trend, Positive)
"""
rsi_ma = talib.EMA((data.open + data.high + data.low + data.close) / 4, 10)
ATR = talib.ATR(data.high, data.low, data.close, 10)
top_line = rsi_ma + ATR
bottom_line = rsi_ma - ATR
rsi_ma = pd.Series(rsi_ma, index=data.index)
PRICE_PREDICT = pd.DataFrame(columns=['POSITION'], index=data.index)
PREDICT_JX = (CROSS(data.close, top_line) == 1)
PREDICT_SX = (CROSS(bottom_line, data.close) == 1)
PREDICT_JX = PREDICT_JX[PREDICT_JX.apply(lambda x: x == True)] # eqv. Trim(x == False)
PREDICT_SX = PREDICT_SX[PREDICT_SX.apply(lambda x: x == True)] # eqv. Trim(x == False)
PRICE_PREDICT.loc[PREDICT_JX.index, 'POSITION'] = 1
PRICE_PREDICT.loc[PREDICT_SX.index, 'POSITION'] = -1
PRICE_PREDICT['POSITION'] = PRICE_PREDICT['POSITION'].ffill()
stop_line = rsi_ma - PRICE_PREDICT['POSITION'] * ATR
return rsi_ma, stop_line, PRICE_PREDICT['POSITION'].values
def ATR_SuperTrend_cross(klines, length=12, Factor=3):
"""
ATR 趋势判断指标,可以实现快速而精准的牛熊趋势判断
the Super Trend ATR allows you to quickly identify trends and the acceleration phase and accumulation
Source: https://cn.tradingview.com/script/alvd6EHP-Bollinger-Bands-V2-Super-Trend/
Translator: 阿财(<EMAIL>)(<EMAIL>)
Parameters
----------
data : (N,) array_like
传入 OHLC Kline 序列。
The OHLC Kline.
period : int or None, optional
DI 统计周期 默认值为 10
DI Length period. Default value is 10.
Returns
-------
Tsl, Trend : ndarray
Tsl 指标和 Trend 趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨)
the Tsl indicator and thread directions sequence. (-1, 0, 1) means for (Neagtive, No Trend, Positive)
"""
src = klines.close.values
Factor = 3 # Factor of Super Trend
ATR_period = 12 # ATR period
Up = (klines.high + klines.low) / 2 - (Factor * talib.ATR(klines.high,
klines.low,
klines.close,
ATR_period))
Dn = (klines.high + klines.low) / 2 + (Factor * talib.ATR(klines.high,
klines.low,
klines.close,
ATR_period))
TUp = np.full([len(src)], np.nan)
for i in np.arange(1, len(src)):
TUp[i] = max(Up[i], TUp[i - 1]) if (src[i - 1] > TUp[i - 1]) else Up[i]
TDown = np.full([len(src)], np.nan)
for i in np.arange(1, len(src)):
TDown[i] = min(Dn[i], TDown[i - 1]) if (src[i - 1] < TDown[i - 1]) else Dn[i]
Trend = np.full([len(src)], np.nan)
for i in np.arange(1, len(src)):
Trend[i] = 1 if (src[i] > TDown[i - 1]) else (-1 if (src[i] < TUp[i - 1]) else Trend[i - 1])
Tsl = np.where(Trend == 1, TUp, TDown)
return Tsl, Trend
def Volume_HMA(klines, period=5):
"""
交易量加权船型移动平均线 HMA,方向指示性类似于 Moving Average ADX,但它们通过不同的指标实现。
Hull Moving Average with Volume | |
statistics' + suffix
logger.info ( title + "\n%s" % self.table ( title = title , prefix = "# " ) )
## standard printout as table
def table ( self , title = 'Jobs execution statistics' , prefix = '' ) :
text = [ (' #jobs ' , '%' , ' total time' , 'time/job' , 'job server') ]
njobs = self.njobs
keys = self.__merged.keys()
for host in sorted ( keys ) :
se = self.__merged [ host ]
nj = se.njobs
time = se.time
mean = time / nj if 1 <= nj else 0.0
if 1 <= nj :
line = ( "%6d " % nj ,
" %5.1f " % ( 100. * nj / njobs ) ,
" %10.4g " % time ,
" %10.4g " % mean ,
" %-s" % host )
else :
line = "%6d "% nj , '', '' , '' , " %-s" % host
text.append ( line )
import ostap.logger.table as T
return T.table ( text , title = title , prefix = prefix )
## standard printout
def __str__ ( self ) :
return self.table ( title = "Job execution statistics" )
@property
def njobs ( self ) :
"""``njobs'' : total number of jobs"""
return sum ( s.njobs for s in self.__merged.values() )
__repr__ = __str__
# =============================================================================
## Merge/combine task results
# @code
# merger = TaskMerger()
# jobs = pool.uimap ( .... )
# for result , stat in jobs :
# merger += result
# merged = merger.result
# @encode
class TaskMerger(object) :
"""Merge task resuls
>>> merger = TaskMerger()
>>> jobs = pool.uimap ( .... )
>>> for result , stat in jobs :
... merger += result
... merged = merger.result
"""
def __init__ ( self , merger = operator.add , init = None ) :
self.__merger = merger
self.__result = init
self.__nmerged = 0
# =========================================================================
## Merge/combine task results
# @code
# merger = TaskMerger()
# jobs = pool.uimap ( .... )
# for result , stat in jobs :
# merger += result
# merged = merger.result
# @encode
def __iadd__ ( self , result ) :
"""Merge task resuls
>>> merger = TaskMerger()
>>> jobs = pool.uimap ( .... )
>>> for result , stat in jobs :
... merger += result
... merged = merger.result
"""
self.merge ( result )
return self
# =========================================================================
## Merge task resuls
# @code
# merger = TaskMerger()
# jobs = pool.uimap ( .... )
# for result , stat in jobs :
# merger.merge ( result )
# merged = merger.result
# @encode
def merge ( self , result ) :
"""Merge task results
>>> merger = TaskMerger()
>>> jobs = pool.uimap ( .... )
>>> for result , stat in jobs :
... merger.merge ( result )
... merged = merger.result
"""
if self.__result is None : self.__result = result
elif self.__merger :
self.__result = self.__merger ( self.__result , result )
elif hasattr ( self.__result , '__iadd__' ) :
self.__result += result
elif hasattr ( self.__result , '__add__' ) or hasattr ( result , '__radd__' ) :
self.__result = self.__result + result
elif hasattr ( self.__result , 'append' ) :
self.__result.append ( result )
elif hasattr ( self.__result , 'add' ) :
self.__result.add ( result )
else :
raise TypeError ( 'TaskMerger: no merge is defined for %s and %s' % ( type ( self.__result ) , type ( result ) ) )
self.__nmerged += 1
return self
@property
def result ( self ) :
"""``result'' : the merged results"""
return self.__result
@property
def nmerged ( self ) :
"""``nmerged'' : number of merged results"""
return self.__nmerged
def __nonzero__ ( self ) : return 0 < self.__nmerged
def __bool__ ( self ) : return 0 < self.__nmerged
def __len__ ( self ) : return self.__nmerged
# =============================================================================
## helper function to execute the task and collect statistic
# (unfortunately due to limitation of <code>parallel python</code> one cannot
# use decorators here :-(
# @see Task
def task_executor ( item ) :
"""Helper function to execute the task and collect job execution statistic
- unfortunately due to limitation of ``parallel python'' one cannot
use python decorators here :-(
- see Task
"""
## unpack
task = item [ 0 ]
jobid = item [ 1 ]
args = item [ 2: ]
import os, re, sys
what = r'(?<!\\)\$[A-Za-z_][A-Za-z0-9_]*'
expandvars = lambda item : re.sub ( what , '' , os.path.expandvars ( item ) )
## change the current working directory
if task.directory :
directory = expandvars ( task.directory )
if os.path.exists ( directory ) and os.path.isdir( directory ) :
logger.debug ( 'Task %s: change the current directory to %s' % ( jobid , directory ) )
os.chdir ( directory )
## modify/update environment variables, if needed
for key in task.environment :
item = expandvars ( task.environment [ key ] )
logger.debug ( 'Task %s: modify the environment variable %s : %s ' % ( jobid , key , item ) )
os.environ [ key ] = item
## 2. prepend paths
for key in task.prepend_to :
item = expandvars ( task.prepend_to [ key ] )
ncmps = item.split ( os.pathsep )
hask = os.environ.get ( key , None )
if hask is None : cmps = ncmps
else : cmps = hask.split ( os.pathsep ) + ncpms
#
path = os.pathsep.join ( cmps )
os.environ [ key ] = path
logger.debug ( 'Task %s: prepend path %s : %s ' % ( jobid , key , path ) )
## 3. append paths
for key in task.append_to :
item = expandvars ( task.append_to [ key ] )
ncmps = item.split ( os.pathsep )
hask = os.environ.get ( key , None )
if hask is None : cmps = ncmps
else : cmps = ncmps + hask.split ( os.pathsep )
#
path = os.pathsep.join ( cmps )
os.environ [ key ] = path
logger.debug ( 'Task %s: append path %s : %s ' % ( jobid , key , path ) )
## 4. Is current directory in the path?
if task.dot_in_path and not '.' in sys.path :
sys.path = ['.'] + sys.path
logger.debug ( "Task %s: '.' is added to sys.path" % jobid )
if task.batch_set :
from ostap.utils.utils import Batch as batch_context
else :
from ostap.utils.utils import NoContext as batch_context
if task.build_set :
from ostap.core.build_dir import UseBuildDir as build_context
else :
from ostap.utils.utils import NoContext as build_context
##
if task.cleanup :
from ostap.utils.cleanup import CleanUpPID as clean_context
else :
from ostap.utils.utils import NoContext as clean_context
## use clean, build & batch context
with clean_context (), build_context ( task.build ), batch_context ( task.batch ) :
## perform remote inialization (if needed)
task.initialize_remote ( jobid )
with Statistics () as stat :
result = task.process ( jobid , *args )
return jobid , result , stat
# =============================================================================
## helper function to execute the function and collect statisticc
# (unfornately due to limitation of <code>parallel python</code> one cannot
# use decorators here :-(
def func_executor ( item ) :
"""Helper function to execute the task and collect job execution statistic
- unfornately due to limitation of ``parallel python'' one cannot
use python decorators here :-(
"""
## unpack
fun = item [ 0 ]
jobid = item [ 1 ]
args = item [ 2: ]
##
from ostap.utils.cleanup import CleanUpPID as clean_context
from ostap.utils.utils import batch
with | |
_general.PyNode)
return res
@_factories.addCmdDocs
def polyCut(*args, **kwargs):
res = cmds.polyCut(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyCylinder(*args, **kwargs):
res = cmds.polyCylinder(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyCylindricalProjection = _factories.getCmdFunc('polyCylindricalProjection')
@_factories.addCmdDocs
def polyDelEdge(*args, **kwargs):
res = cmds.polyDelEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyDelFacet(*args, **kwargs):
res = cmds.polyDelFacet(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyDelVertex(*args, **kwargs):
res = cmds.polyDelVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyDuplicateAndConnect = _factories.getCmdFunc('polyDuplicateAndConnect')
@_factories.addCmdDocs
def polyDuplicateEdge(*args, **kwargs):
res = cmds.polyDuplicateEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyEditEdgeFlow(*args, **kwargs):
res = cmds.polyEditEdgeFlow(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyEditUV = _factories.getCmdFunc('polyEditUV')
polyEditUVShell = _factories.getCmdFunc('polyEditUVShell')
polyEvaluate = _factories.getCmdFunc('polyEvaluate')
@_factories.addCmdDocs
def polyExtrudeEdge(*args, **kwargs):
res = cmds.polyExtrudeEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyExtrudeFacet = _factories.getCmdFunc('polyExtrudeFacet')
@_factories.addCmdDocs
def polyExtrudeVertex(*args, **kwargs):
res = cmds.polyExtrudeVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyFlipEdge(*args, **kwargs):
res = cmds.polyFlipEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyFlipUV(*args, **kwargs):
res = cmds.polyFlipUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyForceUV = _factories.getCmdFunc('polyForceUV')
polyGeoSampler = _factories.getCmdFunc('polyGeoSampler')
@_factories.addCmdDocs
def polyHelix(*args, **kwargs):
res = cmds.polyHelix(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyHole = _factories.getCmdFunc('polyHole')
polyInfo = _factories.getCmdFunc('polyInfo')
@_factories.addCmdDocs
def polyInstallAction(*args, **kwargs):
if len(args):
doPassSelf = kwargs.pop('passSelf', False)
else:
doPassSelf = False
for key in ['cn', 'commandName']:
try:
cb = kwargs[key]
if callable(cb):
kwargs[key] = _factories.makeUICallback(cb, args, doPassSelf)
except KeyError:
pass
res = cmds.polyInstallAction(*args, **kwargs)
return res
@_factories.addCmdDocs
def polyLayoutUV(*args, **kwargs):
res = cmds.polyLayoutUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyListComponentConversion = _factories.getCmdFunc('polyListComponentConversion')
@_factories.addCmdDocs
def polyMapCut(*args, **kwargs):
res = cmds.polyMapCut(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyMapDel(*args, **kwargs):
res = cmds.polyMapDel(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyMapSew(*args, **kwargs):
res = cmds.polyMapSew(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyMapSewMove(*args, **kwargs):
res = cmds.polyMapSewMove(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyMergeEdge(*args, **kwargs):
res = cmds.polyMergeEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyMergeFacet = _factories.getCmdFunc('polyMergeFacet')
@_factories.addCmdDocs
def polyMergeUV(*args, **kwargs):
res = cmds.polyMergeUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyMergeVertex = _factories.getCmdFunc('polyMergeVertex')
polyMirrorFace = _factories.getCmdFunc('polyMirrorFace')
@_factories.addCmdDocs
def polyMoveEdge(*args, **kwargs):
res = cmds.polyMoveEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyMoveFacet = _factories.getCmdFunc('polyMoveFacet')
@_factories.addCmdDocs
def polyMoveFacetUV(*args, **kwargs):
res = cmds.polyMoveFacetUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyMoveUV(*args, **kwargs):
res = cmds.polyMoveUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyMoveVertex(*args, **kwargs):
res = cmds.polyMoveVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyMultiLayoutUV = _factories.getCmdFunc('polyMultiLayoutUV')
@_factories.addCmdDocs
def polyNormal(*args, **kwargs):
res = cmds.polyNormal(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyNormalPerVertex(*args, **kwargs):
res = cmds.polyNormalPerVertex(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyNormalizeUV(*args, **kwargs):
res = cmds.polyNormalizeUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyOptUvs(*args, **kwargs):
res = cmds.polyOptUvs(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyOptions = _factories.getCmdFunc('polyOptions')
polyOutput = _factories.getCmdFunc('polyOutput')
@_factories.addCmdDocs
def polyPinUV(*args, **kwargs):
res = cmds.polyPinUV(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyPipe(*args, **kwargs):
res = cmds.polyPipe(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyPlanarProjection = _factories.getCmdFunc('polyPlanarProjection')
@_factories.addCmdDocs
def polyPlane(*args, **kwargs):
res = cmds.polyPlane(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyPlatonicSolid(*args, **kwargs):
res = cmds.polyPlatonicSolid(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyPoke(*args, **kwargs):
res = cmds.polyPoke(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyPrimitive(*args, **kwargs):
res = cmds.polyPrimitive(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyPrism(*args, **kwargs):
res = cmds.polyPrism(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyProjectCurve(*args, **kwargs):
res = cmds.polyProjectCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyProjection = _factories.getCmdFunc('polyProjection')
@_factories.addCmdDocs
def polyPyramid(*args, **kwargs):
res = cmds.polyPyramid(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyQuad(*args, **kwargs):
res = cmds.polyQuad(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyQueryBlindData = _factories.getCmdFunc('polyQueryBlindData')
@_factories.addCmdDocs
def polyReduce(*args, **kwargs):
res = cmds.polyReduce(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyRemesh(*args, **kwargs):
res = cmds.polyRemesh(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyRetopo(*args, **kwargs):
res = cmds.polyRetopo(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polySelect = _factories.getCmdFunc('polySelect')
polySelectConstraint = _factories.getCmdFunc('polySelectConstraint')
@_factories.addCmdDocs
def polySelectConstraintMonitor(*args, **kwargs):
if len(args):
doPassSelf = kwargs.pop('passSelf', False)
else:
doPassSelf = False
for key in ['cc', 'changeCommand']:
try:
cb = kwargs[key]
if callable(cb):
kwargs[key] = _factories.makeUICallback(cb, args, doPassSelf)
except KeyError:
pass
res = cmds.polySelectConstraintMonitor(*args, **kwargs)
return res
@_factories.addCmdDocs
def polySeparate(*args, **kwargs):
res = cmds.polySeparate(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polySetToFaceNormal = _factories.getCmdFunc('polySetToFaceNormal')
@_factories.addCmdDocs
def polySewEdge(*args, **kwargs):
res = cmds.polySewEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polySlideEdge = _factories.getCmdFunc('polySlideEdge')
@_factories.addCmdDocs
def polySmooth(*args, **kwargs):
res = cmds.polySmooth(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polySoftEdge(*args, **kwargs):
res = cmds.polySoftEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polySphere(*args, **kwargs):
res = cmds.polySphere(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polySphericalProjection = _factories.getCmdFunc('polySphericalProjection')
@_factories.addCmdDocs
def polySplit(*args, **kwargs):
res = cmds.polySplit(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polySplitEdge(*args, **kwargs):
res = cmds.polySplitEdge(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polySplitRing(*args, **kwargs):
res = cmds.polySplitRing(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polySplitVertex = _factories.getCmdFunc('polySplitVertex')
@_factories.addCmdDocs
def polyStraightenUVBorder(*args, **kwargs):
res = cmds.polyStraightenUVBorder(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polySubdivideEdge = _factories.getCmdFunc('polySubdivideEdge')
polySubdivideFacet = _factories.getCmdFunc('polySubdivideFacet')
@_factories.addCmdDocs
def polyToSubdiv(*args, **kwargs):
res = cmds.polyToSubdiv(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyTorus(*args, **kwargs):
res = cmds.polyTorus(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyTransfer(*args, **kwargs):
res = cmds.polyTransfer(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyTriangulate(*args, **kwargs):
res = cmds.polyTriangulate(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyUVCoverage = _factories.getCmdFunc('polyUVCoverage')
polyUVOverlap = _factories.getCmdFunc('polyUVOverlap')
@_factories.addCmdDocs
def polyUVRectangle(*args, **kwargs):
res = cmds.polyUVRectangle(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
polyUVSet = _factories.getCmdFunc('polyUVSet')
polyUVStackSimilarShells = _factories.getCmdFunc('polyUVStackSimilarShells')
@_factories.addCmdDocs
def polyUnite(*args, **kwargs):
res = cmds.polyUnite(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def polyWedgeFace(*args, **kwargs):
res = cmds.polyWedgeFace(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def projectCurve(*args, **kwargs):
res = cmds.projectCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def projectTangent(*args, **kwargs):
res = cmds.projectTangent(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
propMove = _factories.getCmdFunc('propMove')
querySubdiv = _factories.getCmdFunc('querySubdiv')
@_factories.addCmdDocs
def rebuildCurve(*args, **kwargs):
res = cmds.rebuildCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def rebuildSurface(*args, **kwargs):
res = cmds.rebuildSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
refineSubdivSelectionList = _factories.getCmdFunc('refineSubdivSelectionList')
@_factories.addCmdDocs
def reverseCurve(*args, **kwargs):
res = cmds.reverseCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def reverseSurface(*args, **kwargs):
res = cmds.reverseSurface(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def revolve(*args, **kwargs):
res = cmds.revolve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
@_factories.addCmdDocs
def roundConstantRadius(*args, **kwargs):
res = cmds.roundConstantRadius(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
setXformManip = _factories.getCmdFunc('setXformManip')
showMetadata = _factories.getCmdFunc('showMetadata')
singleProfileBirailSurface = _factories.getCmdFunc('singleProfileBirailSurface')
@_factories.addCmdDocs
def smoothCurve(*args, **kwargs):
res = cmds.smoothCurve(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
smoothTangentSurface = _factories.getCmdFunc('smoothTangentSurface')
@_factories.addCmdDocs
def sphere(*args, **kwargs):
res = cmds.sphere(*args, **kwargs)
if not kwargs.get('query', kwargs.get('q', False)):
res = _factories.maybeConvert(res, _general.PyNode)
return res
squareSurface = | |
<filename>functions_for_AirBnB.py
# The collection of functions for the Boston AirBnB dataset
# import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar #To check holidays in the U.S
import time
import copy
def load_bnb_files():
'''Load AirBnB files'''
df_listing = pd.read_csv('./data/listings.csv')
df_calendar = pd.read_csv('./data/calendar.csv')
return df_listing, df_calendar
# Modify df_calendar for future work
# Special event : marathon, new academic season
def modify_calendar(df_calendar):
'''
This function creates 'year', 'month', 'day', 'weekday', and 'week_number' columns from 'date' coulmn of df_calendar
and remove '$' string from 'price' coulmn.
Input : a Pandas dataframe having a date data column
Output : a Pandas dataframe having year, month, day, weekday, us_holiday columns
'''
# Split date column into year, month,day, weekday columns
# The day of the week with Monday=0, Sunday=6
# Set the range of weekends from Friday to Sunday
df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year
df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month
df_calendar['day'] = pd.DatetimeIndex(df_calendar['date']).day
df_calendar['weekday'] = pd.DatetimeIndex(df_calendar['date']).weekday
df_calendar['week_number'] = pd.DatetimeIndex(df_calendar['date']).week
df_calendar['price']= df_calendar['price'].str.replace('$','')
df_calendar['price']=df_calendar['price'].str.replace(',','')
df_calendar['price'] = df_calendar['price'].astype(float)
# Add us_holiday column
cal = calendar()
holidays = cal.holidays(start=df_calendar.date.min(), end=df_calendar.date.max())
df_calendar['us_holiday'] = df_calendar.date.astype('datetime64').isin(holidays)
# Add weekend column #Friday, Saturday
weekend = [4,5]
df_calendar['weekend'] = df_calendar.weekday.isin(weekend)
# Replace values in weekday column
df_calendar['weekday'].replace({0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',4:'Friday', 5:'Saturday', 6:'Sunday'}, inplace=True)
return df_calendar
def add_availabledays_price(df_listing, df_cal_modified):
'''
This function creates the columns of 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' where calculated from df_cal_modified on df_listing.
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
- A pandas dataframe modified by modify_calendar() : df_cal_modified
Output :
- The modified df_listing dataframe with new 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' columns
'''
id_list = df_listing.id[:]
unavailable_days_array = np.array([])
avail_days_weekends_array = np.array([])
avail_days_weekdays_array = np.array([])
price_weekend_array = np.array([])
price_weekday_array = np.array([])
for i in np.nditer(id_list):
tmp = df_cal_modified[(df_cal_modified.listing_id == i)] # Make a dataframe coming from df_listing with a certain id
available_dict = tmp.available.value_counts().to_dict()
if 'f' in available_dict:
unavailable_days = tmp[tmp.available == 'f'].shape[0]
else:
unavailable_days = 0
if 't' in available_dict:
available_weekends = tmp[(tmp.available == 't') & (tmp.weekend == True)].shape[0]
available_weekdays = tmp[(tmp.available == 't') & (tmp.weekend == False)].shape[0]
price_weekend = tmp[(tmp.weekend == True) & (tmp.available == 't')].price.astype(float).describe()['mean']
price_weekday = tmp[(tmp.weekend == False) & (tmp.available == 't')].price.astype(float).describe()['mean']
else:
available_weekends = 0
available_weekdays = 0
price_weekend = np.nan
price_weekday = np.nan
unavailable_days_array = np.append(unavailable_days_array, unavailable_days)
avail_days_weekends_array = np.append(avail_days_weekends_array, available_weekends)
avail_days_weekdays_array = np.append(avail_days_weekdays_array, available_weekdays)
price_weekend_array = np.append(price_weekend_array, price_weekend)
price_weekday_array = np.append(price_weekday_array, price_weekday)
df_listing['unavail_days'] = pd.Series(unavailable_days_array)
df_listing['avail_days_weekends'] = pd.Series(avail_days_weekends_array)
df_listing['avail_days_weekdays'] = pd.Series(avail_days_weekdays_array)
df_listing['price_weekend'] = pd.Series(price_weekend_array)
df_listing['price_weekday'] = pd.Series(price_weekday_array)
return df_listing
def clean_listing_df(df_listing):
'''
This function aims to make the df_listing dataframe for data analysis by
- removing irrelevant columns
- changing object type columns to numeric columns or manipulating them using one hot encoding
- filling NaN values
- creating an integrated_score_log column by the natural log of the result from 'review_scores_rating' times 'number_of_reviews' +1
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
Output :
- Cleaned df_listing
'''
# Drop columns having 50% of nan value. There were reasons that I decided 50% the threshold for dropping columns.
# 1. Easy to see the dataframe and to check the meaning of the columns.
# 2. Decide which ones have to be dropped.
# The candidates columns to be dropped are 'notes', 'neighbourhood_group_cleansed', 'square_feet', 'weekly_price', 'monthly_price', 'security_deposit', 'has_availability', 'license', 'jurisdiction_names'. Most of them are duplicated to other columns or irrelavant except 'security_deposit' column. I didn't do imputing by the mean or mode of the column because it can distort real shape. I didn't do one-hot-encoding to make the dataframe straightforward. 'security_deposit' has 55 unique values.
df_missing = df_listing.isna().mean()
df_listing_modi1 = df_listing.drop(df_missing[df_missing>0.5].index.to_list(), axis=1)
# Drop columns related with urls and other irrelevant columns.
# url and othe columns are all unique or useless.
remove_list1 = ['listing_url', 'scrape_id', 'last_scraped', 'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url', 'host_url',
'host_thumbnail_url', 'host_picture_url', 'country_code', 'country']
df_listing_modi1.drop(remove_list1, axis=1, inplace=True)
# Drop the columns because of data overlap [city, smart_location], Only one value [state],
# Drop the wrong data [market, calendar_last_scraped]
remove_list2 = ['smart_location', 'state', 'name', 'summary', 'space', 'description','neighborhood_overview',
'transit','access','market','calendar_last_scraped']
df_listing_modi1.drop(remove_list2, axis=1, inplace=True)
# Modify 'house_rules' column to 'house_rules_exist_tf' having True value if there is a rule.
# False value, if there is no rule.
# Houes_rules are different for every host. So it is not practical to use one-hot-encoding. Instead of that,
# It is changed to binary type, which is there is rule in a house, True, otherwise, False.
# This can save some information, which is better than just dropping.
df_listing_modi1['house_rules_exist_tf']= pd.notna(df_listing_modi1.house_rules)
df_listing_modi1.drop(['house_rules'], axis=1, inplace=True)
# Remove columns having 1000 unique string valuses and irrelevant data
remove_list3 = ['interaction', 'host_name', 'host_since', 'host_about', 'street','first_review','experiences_offered','requires_license',
'last_review','host_location','neighbourhood_cleansed','experiences_offered','requires_license']
df_listing_modi2 = df_listing_modi1.drop(remove_list3, axis=1)
# Change the columns 'host_response_rate', 'host_acceptance_rate' to float type
columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee']
for i in columns_change_type:
df_listing_modi2[i] = df_listing_modi2[i].str.replace('%','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace('$','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace(',','')
df_listing_modi2[i] = df_listing_modi2[i].astype(float)
# Modify and Split values in 'amenities' column
# Amenities can be one of reason that potential candidate might consider.
df_listing_modi2.amenities = df_listing_modi2.amenities.str.replace("[{}]", "")
df_amenities = df_listing_modi2.amenities.str.get_dummies(sep = ",")
df_amenities = df_amenities.add_prefix('amenities_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_amenities], axis=1)
df_listing_modi2 = df_listing_modi2.drop('amenities', axis=1)
# Use get_dummies for columns having unique values less then 10
# It is reasonable to use one-hot-encoding if the nunber of unique values are less then 10.
# It doesn't lose information, and keep the dataframe simple.
columns_of_object_less10 =[]
for i,j in zip(df_listing_modi2.columns.to_list(), df_listing_modi2.dtypes.to_list()):
if j == object and len(df_listing_modi2[i].value_counts()) < 10 :
columns_of_object_less10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_less10, prefix=columns_of_object_less10,
dummy_na=True)
# Modify 'extra_people' coulmn to get boolean type of 'extra_people_fee_tf'
# Instead of dropping, I decided to change 'extra_people' coulmn to binary type to save some information
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(str)
df_listing_modi2['extra_people']= df_listing_modi2['extra_people'].str.replace('$','')
df_listing_modi2['extra_people']=df_listing_modi2['extra_people'].str.replace(',','')
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(float)
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].replace(to_replace=0, value=np.nan)
df_listing_modi2['extra_people_fee_tf']= pd.notna(df_listing_modi2.extra_people)
df_listing_modi2 = df_listing_modi2.drop('extra_people', axis=1)
# Modify and Split values in 'host_verifications' column
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("[", "")
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("]", "")
df_host_verifications = df_listing_modi2.host_verifications.str.get_dummies(sep = ",")
df_host_verifications = df_host_verifications.add_prefix('host_verification_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_host_verifications], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_verifications'], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_neighbourhood'], axis=1)
# Modify 'calendar_updated' column
# Instead of dropping, I decided to change 'calendar_updated' coulmn to binary type (updated within a week or not)
# to save some information
df_listing_modi2["calendar_updated_1weekago"] = np.where(df_listing_modi2['calendar_updated'].str.contains(
"days|yesterday|today|a week ago")==True, 'yes', 'more_than_1week')
df_listing_modi2 = df_listing_modi2.drop(['calendar_updated'], axis=1)
# Use get_dummies for the columns 'neighbourhood', 'city', 'zipcode', 'property_type'
tmp = df_listing_modi2.columns.to_list()
tmp1 = df_listing_modi2.dtypes.to_list()
columns_of_object_over10 =[]
for i,j in zip(tmp,tmp1):
if j == object and len(df_listing_modi2[i].value_counts()) > 10 :
columns_of_object_over10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_over10,
prefix=columns_of_object_over10, dummy_na=True)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'],
prefix=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], dummy_na=True)
df_listing_modi2["host_response_rate_100"] = np.where(df_listing_modi2['host_response_rate'] ==100, True, False)
df_listing_modi2["host_acceptance_rate_100"] = np.where(df_listing_modi2['host_acceptance_rate'] ==100, True, False)
df_listing_modi2 = df_listing_modi2.drop(['host_response_rate','host_acceptance_rate','reviews_per_month'], axis=1)
# bathrooms, bedrooms, beds, cleaning_fee, review_scores_rating, review_... : : fillna with mean value
# The empty cell are filled with mean values of corresponding columns. Because these are numerical type,
# I thought imputing with mean values is better than dropping or one-hot-encoding
columns1 = ['bathrooms','bedrooms','beds','cleaning_fee','review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',
'review_scores_communication','review_scores_location','review_scores_value']
df_listing_modi2[columns1] = df_listing_modi2[columns1].fillna(df_listing_modi2.mean())
df_listing_modi2.price_weekend.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2.price_weekday.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2['integrated_score_log'] = np.log(df_listing_modi2['review_scores_rating']*df_listing_modi2['number_of_reviews']+1)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'],
prefix=['host_response_rate_100','host_acceptance_rate_100'])
df_listing_modi2 = df_listing_modi2.drop(['id', 'host_id', 'latitude', 'longitude','price','host_listings_count','host_total_listings_count','maximum_nights'], axis=1)
return df_listing_modi2
def conditioning_listing_df(df_listing_modi2):
'''
This function is for conditioning a dataframe returned by the funtion 'clean_listing_df(df_listing)''
Input :
- A Pandas dataframe came from the function 'clean_listing_df(df_listing)''
Output :
- Cleaned df_listing_modi2 : df_listing_modi3
'''
threshold_80 = df_listing_modi2.integrated_score_log.quantile(0.8)
condition = [df_listing_modi2['integrated_score_log'] == 0, df_listing_modi2['integrated_score_log'] >= threshold_80]
label_list = ['poor','high']
df_listing_modi2['y_label'] = np.select(condition, label_list, default='normal')
# Drop columns related to 'y_label' column
# Without dropping, the remained columns affect model's prediction
df_listing_modi3 = df_listing_modi2.drop(['integrated_score_log','number_of_reviews','review_scores_rating', 'review_scores_value',
'review_scores_communication','review_scores_accuracy','review_scores_checkin','review_scores_cleanliness',
'review_scores_location', 'availability_30','availability_60', 'availability_90','availability_365','calculated_host_listings_count'], axis=1)
return df_listing_modi3
def investigate(df_listing_scaled, pca, i):
'''
This function checks pca components that which original features are storngly related to a pca | |
of each tensor in `outputs`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "InfeedDequeueTuple", name,
tld.op_callbacks, "dtypes", dtypes, "shapes", shapes)
return _result
except _core._FallbackException:
try:
return infeed_dequeue_tuple_eager_fallback(
dtypes=dtypes, shapes=shapes, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'infeed_dequeue_tuple' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'infeed_dequeue_tuple' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"InfeedDequeueTuple", dtypes=dtypes, shapes=shapes, name=name)
_result = _outputs[:]
if not _result:
return _op
if _execute.must_record_gradient():
_attrs = ("dtypes", _op.get_attr("dtypes"), "shapes",
_op.get_attr("shapes"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"InfeedDequeueTuple", _inputs_flat, _attrs, _result)
return _result
InfeedDequeueTuple = tf_export("raw_ops.InfeedDequeueTuple")(_ops.to_raw_op(infeed_dequeue_tuple))
def infeed_dequeue_tuple_eager_fallback(dtypes, shapes, name, ctx):
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'infeed_dequeue_tuple' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'infeed_dequeue_tuple' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
_inputs_flat = []
_attrs = ("dtypes", dtypes, "shapes", shapes)
_result = _execute.execute(b"InfeedDequeueTuple", len(dtypes),
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"InfeedDequeueTuple", _inputs_flat, _attrs, _result)
return _result
def infeed_enqueue(input, shape=[], layout=[], device_ordinal=-1, name=None):
r"""An op which feeds a single Tensor value into the computation.
Args:
input: A `Tensor`.
A tensor that will be provided using the infeed mechanism.
shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`.
The shape of the tensor.
layout: An optional list of `ints`. Defaults to `[]`.
A vector holding the requested layout in minor-to-major sequence.
If a layout attribute is passed, but its values are all -1, the layout will
be computed by the infeed operation.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. This should be -1 when the Op
is running on a TPU device, and >= 0 when the Op is running on the CPU
device.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "InfeedEnqueue", name,
tld.op_callbacks, input, "shape", shape, "layout", layout,
"device_ordinal", device_ordinal)
return _result
except _core._FallbackException:
try:
return infeed_enqueue_eager_fallback(
input, shape=shape, layout=layout, device_ordinal=device_ordinal,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if shape is None:
shape = []
shape = _execute.make_shape(shape, "shape")
if layout is None:
layout = []
if not isinstance(layout, (list, tuple)):
raise TypeError(
"Expected list for 'layout' argument to "
"'infeed_enqueue' Op, not %r." % layout)
layout = [_execute.make_int(_i, "layout") for _i in layout]
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"InfeedEnqueue", input=input, shape=shape, layout=layout,
device_ordinal=device_ordinal, name=name)
return _op
InfeedEnqueue = tf_export("raw_ops.InfeedEnqueue")(_ops.to_raw_op(infeed_enqueue))
def infeed_enqueue_eager_fallback(input, shape, layout, device_ordinal, name, ctx):
if shape is None:
shape = []
shape = _execute.make_shape(shape, "shape")
if layout is None:
layout = []
if not isinstance(layout, (list, tuple)):
raise TypeError(
"Expected list for 'layout' argument to "
"'infeed_enqueue' Op, not %r." % layout)
layout = [_execute.make_int(_i, "layout") for _i in layout]
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("dtype", _attr_dtype, "shape", shape, "layout", layout,
"device_ordinal", device_ordinal)
_result = _execute.execute(b"InfeedEnqueue", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
_result = None
return _result
def infeed_enqueue_prelinearized_buffer(input, device_ordinal=-1, name=None):
r"""An op which enqueues prelinearized buffer into TPU infeed.
Args:
input: A `Tensor` of type `variant`.
A variant tensor representing linearized output.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. This should be -1 when the Op is running on a TPU device
and = 0 when the Op is running on the CPU device.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"InfeedEnqueuePrelinearizedBuffer", name, tld.op_callbacks, input,
"device_ordinal", device_ordinal)
return _result
except _core._FallbackException:
try:
return infeed_enqueue_prelinearized_buffer_eager_fallback(
input, device_ordinal=device_ordinal, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"InfeedEnqueuePrelinearizedBuffer", input=input,
device_ordinal=device_ordinal,
name=name)
return _op
InfeedEnqueuePrelinearizedBuffer = tf_export("raw_ops.InfeedEnqueuePrelinearizedBuffer")(_ops.to_raw_op(infeed_enqueue_prelinearized_buffer))
def infeed_enqueue_prelinearized_buffer_eager_fallback(input, device_ordinal, name, ctx):
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
input = _ops.convert_to_tensor(input, _dtypes.variant)
_inputs_flat = [input]
_attrs = ("device_ordinal", device_ordinal)
_result = _execute.execute(b"InfeedEnqueuePrelinearizedBuffer", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
_result = None
return _result
def infeed_enqueue_tuple(inputs, shapes, layouts=[], device_ordinal=-1, name=None):
r"""Feeds multiple Tensor values into the computation as an XLA tuple.
Args:
inputs: A list of `Tensor` objects.
A list of tensors that will be provided using the infeed mechanism.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `inputs`.
layouts: An optional list of `ints`. Defaults to `[]`.
A vector holding the requested layout in minor-to-major sequence for
all the tuple shapes, in the order the shapes appear in the "shapes" input.
The layout elements for a sub-shape can be set to -1, in which case the
corresponding layout will be computed by the infeed operation.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. This should be -1 when the Op
is running on a TPU device, and >= 0 when the Op is running on the CPU
device.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "InfeedEnqueueTuple", name,
tld.op_callbacks, inputs, "shapes", shapes, "layouts", layouts,
"device_ordinal", device_ordinal)
return _result
except _core._FallbackException:
try:
return infeed_enqueue_tuple_eager_fallback(
inputs, shapes=shapes, layouts=layouts,
device_ordinal=device_ordinal, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'infeed_enqueue_tuple' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if layouts is None:
layouts = []
if not isinstance(layouts, (list, tuple)):
raise TypeError(
"Expected list for 'layouts' argument to "
"'infeed_enqueue_tuple' Op, not %r." % layouts)
layouts = [_execute.make_int(_i, "layouts") for _i in layouts]
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"InfeedEnqueueTuple", inputs=inputs, shapes=shapes, layouts=layouts,
device_ordinal=device_ordinal, name=name)
return _op
InfeedEnqueueTuple = tf_export("raw_ops.InfeedEnqueueTuple")(_ops.to_raw_op(infeed_enqueue_tuple))
def infeed_enqueue_tuple_eager_fallback(inputs, shapes, layouts, device_ordinal, name, ctx):
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'infeed_enqueue_tuple' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if layouts is None:
layouts = []
if not isinstance(layouts, (list, tuple)):
raise TypeError(
"Expected list for 'layouts' argument to "
"'infeed_enqueue_tuple' Op, not %r." % layouts)
layouts = [_execute.make_int(_i, "layouts") for _i in layouts]
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx)
_inputs_flat = list(inputs)
_attrs = ("dtypes", _attr_dtypes, "shapes", shapes, "layouts", layouts,
"device_ordinal", device_ordinal)
_result = _execute.execute(b"InfeedEnqueueTuple", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
_result = None
return _result
def load_tpu_embedding_adam_parameters(parameters, momenta, velocities, num_shards, shard_id, table_id=-1, table_name="", config="", name=None):
r"""Load ADAM embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be
preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
embedding table configuration. For example, this | |
<filename>fourInARowWrapper.py
import numpy as np
from gym import spaces
import gym
import fourInARow
import copy
class ActionSpace(spaces.Discrete):
def __init__(self, size):
self.high = fourInARow.width
self.low = 0
super().__init__(size)
class FourInARowWrapper(gym.Env):
def __init__(self, player):
self.player = player
self.action_space = ActionSpace(fourInARow.width)
#self.action_space = ActionSpace([0], [8])
fourInARow.init(player)
self.state = self.getHotEncodedState2d()
def ansi(self, style):
return "\033[{0}m".format(style)
def seed(self, seed=None):
if seed is not None:
np.random.seed(seed)
return np.random.random_sample()
def reset(self, player):
fourInARow.init(player)
self.player = player
return self.getHotEncodedState2d()
def step(self, action):
fourInARow.drop_disc(int(action))
reward = 0
if fourInARow.state != "Playing":
if fourInARow.winner == fourInARow.player:
reward = -1
elif fourInARow.winner == (fourInARow.player ^ 3):
reward = 1
else:
reward = 0.5
done = True
else:
done = False
stateOneHotEncoded = self.getHotEncodedState2d()
self.state = stateOneHotEncoded
return (stateOneHotEncoded, reward, done, None)
def robotStep(self, level):
action = self.getRobotAction(level)
#print("Robot action:", action+1)
stateOneHotEncoded, reward, done, _ = self.step(action)
return (stateOneHotEncoded, reward, done, action)
def getRobotAction(self, level):
import random
rules = [self.ruleIsWinHyper(1), self.ruleIsLoseHyper(1), self.ruleIsWinHyper(2), self.ruleIsLoseHyper(2)]
if level > len(rules):
print("Error! Level can't be bigger than", len(rules))
raise Exception
rules = rules[:level]
col_points = [0 for _ in range(fourInARow.width)]
candidates = list(range(fourInARow.width))
# for col in columns:
# #l = [x for rule in rules for x in rule]
# col_points[col] = sum(rules[x](col) << (len(rules)-x) for x in range(len(rules)))
for rule in rules:
new_candidates = []
for column in candidates:
if not self.ruleIsAvaliable(column):
col_points[column] -= 1e3
continue
col_points[column] += rule(column)
max_points = max(col_points)
min_points = min(col_points)
#print("Rule", rules.index(rule), "Column points:", col_points)
for column in candidates:
if col_points[column] == max_points:
new_candidates.append(column)
candidates = new_candidates
if len(candidates) == 1:
break
candidates = [i for i in range(len(col_points)) if col_points[i] == max_points]
ret_col = random.choice(candidates)
#print("Cadidates:", candidates)
#print("Column points:", col_points)
return ret_col
def ruleIsAvaliable(self, column):
return fourInARow.get_available_cols()[column]
def recTheoreticalPlaying(self, depth, board, player, next_player):
str = ""
for c in range(depth):
str += "-"
ret = False
if depth == 0:
return self.checkWin(board, player)
for column in range(fourInARow.width):
newBoard = copy.deepcopy(board)
col_height = sum([1 for x in newBoard[column] if x > 0])
# Check if column is full
if col_height == fourInARow.height:
continue
newBoard[column][col_height] = next_player
#self.renderBoardPlayer(newBoard, next_player)
#print(str, "column:", column, "player:", next_player, "depth:", depth, "checking win for player:", player)
if depth > 1:
ret += self.recTheoreticalPlaying(depth-1, newBoard, player, next_player ^ 3)/(depth**fourInARow.width)
else:
ret += self.checkWin(newBoard, player)
#if self.checkWin(newBoard, player):
#print(str, "win:", self.checkWin(newBoard, player), "---------------------------------------------------------------")
#print(str, "loop return:", ret)
return ret
def ruleIsWinHyper(self, drops):
def ruleIsWinN(column):
board = copy.deepcopy(fourInARow.board)
col_height = sum([1 for x in board[column] if x > 0])
if col_height == fourInARow.height:
return False
mePlayer = fourInARow.player
opponent = mePlayer ^ 3
# Drop disc
board[column][col_height] = fourInARow.player
#print("player", fourInARow.player, "tries column", column)
ret = self.recTheoreticalPlaying(depth=(drops-1)*2, board=board, player=mePlayer, next_player=opponent)
#print("points:", ret)
return ret
return ruleIsWinN
def ruleIsLoseHyper(self, drops):
def ruleIsLoseN(column):
board = copy.deepcopy(fourInARow.board)
col_height = sum([1 for x in board[column] if x > 0])
if col_height == fourInARow.height:
return False
mePlayer = fourInARow.player
opponent = mePlayer ^ 3
# Drop disc
board[column][col_height] = fourInARow.player
#print("player", fourInARow.player, "tries column", column)
ret = self.recTheoreticalPlaying(depth=drops*2-1, board=board, player=mePlayer ^ 3, next_player=opponent)
#print("Points:", ret)
return -ret
return ruleIsLoseN
def ruleIsWin(self, column):
#print("rule1:", column)
# Check if dropping in column is a winning turn
board = copy.deepcopy(fourInARow.board)
col_height = sum([1 for x in board[column] if x > 0])
if col_height == fourInARow.height:
return False
# Drop disc
board[column][col_height] = fourInARow.player
return self.checkWin(board, fourInARow.player)
def ruleIsBlockLose(self, column):
# Check if opponent wins if I don't drop in a column
board = copy.deepcopy(fourInARow.board)
col_height = sum([1 for x in board[column] if x > 0])
if col_height == fourInARow.height:
return False
# Inverse player, drop disc
board[column][col_height] = fourInARow.player^3
return self.checkWin(board, fourInARow.player)
def ruleIsBlockLoseTwoStepsAhead(self, column):
# Check if column is already full
col_height = sum([1 for x in fourInARow.board[column] if x > 0])
if col_height == fourInARow.height:
return False
for opponentColumn in range(fourInARow.width):
board = copy.deepcopy(fourInARow.board)
def checkWin(self, board, player):
deltas = [1, 2, 3]
winner = 0
for x in range(fourInARow.width):
right = x < fourInARow.width - 3
for y in range(fourInARow.height):
upper = y < fourInARow.height - 3
lower = y > 2
if board[x][y] > 0 and \
((upper and 3 == sum(1 for d in deltas if board[x][y] == board[x][y + d])) or \
(right and 3 == sum(1 for d in deltas if board[x][y] == board[x + d][y])) or \
(right and upper and 3 == sum(1 for d in deltas if board[x][y] == board[x + d][y + d])) or \
(right and lower and 3 == sum(1 for d in deltas if board[x][y] == board[x + d][y - d]))):
winner = board[x][y]
if player == winner:
return True
else:
return False
def getAvaliableColumns(self):
return np.reshape(np.array(fourInARow.get_available_cols()).astype(np.float32), (fourInARow.width))
def render(self, mode='human'):
self.renderBoardPlayer(fourInARow.board, fourInARow.player)
# if fourInARow.player == 1:
# player = "X"
# else:
# player = "O"
#
# print("Player:", player, "\n")
# row = " "
# for n in range(fourInARow.width):
# row += str(n+1) + " "
# print(row)
#
# row = "|"
# for _ in range(fourInARow.width):
# row += "---|"
# print(row)
#
# for y in range(fourInARow.height):
# row = "|"
# for x in range(fourInARow.width):
# color = 30 + fourInARow.board[x][fourInARow.height-y-1]
# character = " "
#
# if fourInARow.board[x][fourInARow.height - y - 1] == 1:
# character = " X "
# elif fourInARow.board[x][fourInARow.height - y - 1] == 2:
# character = " O "
#
# if fourInARow.latest == (x, fourInARow.height-y-1):
# color += 10
# row += self.ansi(color) + character + self.ansi(0) + "|"
#
# print(row)
#
# row = "|"
# for _ in range(fourInARow.width):
# row += "---|"
# print(row)
#print("\n")
def renderBoardPlayer(self, board, player):
if player == 1:
player = "X"
else:
player = "O"
print("Player:", player, "\n")
row = " "
for n in range(fourInARow.width):
row += str(n+1) + " "
print(row)
row = "|"
for _ in range(fourInARow.width):
row += "---|"
print(row)
for y in range(fourInARow.height):
row = "|"
for x in range(fourInARow.width):
color = 30 + board[x][fourInARow.height-y-1]
character = " "
if board[x][fourInARow.height - y - 1] == 1:
character = " X "
elif board[x][fourInARow.height - y - 1] == 2:
character = " O "
if fourInARow.latest == (x, fourInARow.height-y-1):
color += 10
row += self.ansi(color) + character + self.ansi(0) + "|"
print(row)
row = "|"
for _ in range(fourInARow.width):
row += "---|"
print(row)
#print("\n")
def close(self):
pass
def getHotEncodedState(self):
board = np.reshape(np.array(fourInARow.board), fourInARow.height * fourInARow.width)
boardOneHotEncoded = np.zeros(fourInARow.height * fourInARow.width * 2)
player = fourInARow.player
playerOneHotEncoded = np.zeros(2)
if player == 1:
playerOneHotEncoded[0] = 1
playerOneHotEncoded[1] = 0
elif player == 2:
playerOneHotEncoded[0] = 0
playerOneHotEncoded[1] = 1
for i in range(board.size):
if board[i] == 1:
boardOneHotEncoded[2 * i] = 1
boardOneHotEncoded[2 * i + 1] = 0
elif board[i] == 2:
boardOneHotEncoded[2 * i] = 0
boardOneHotEncoded[2 * i + 1] = 1
else:
boardOneHotEncoded[2 * i] = 0
boardOneHotEncoded[2 * i + 1] = 0
return np.concatenate([playerOneHotEncoded, boardOneHotEncoded])
def getHotEncodedState2d(self):
board = np.array(fourInARow.board)
boardOneHotEncoded = np.resize(np.expand_dims(np.zeros(board.shape), axis=2), (7,6,2))
player = fourInARow.player
playerOneHotEncoded = np.zeros(2)
if player == 1:
playerOneHotEncoded[0] = 1
playerOneHotEncoded[1] = 0
elif player == 2:
playerOneHotEncoded[0] = 0
playerOneHotEncoded[1] = 1
for x in range(board.shape[0]):
for y in range(board.shape[1]):
if board[x][y] == 1:
boardOneHotEncoded[x][y][0] = 1
boardOneHotEncoded[x][y][1] = 0
elif board[x][y] == 2:
boardOneHotEncoded[x][y][0] = 0
boardOneHotEncoded[x][y][1] = 1
else:
boardOneHotEncoded[x][y][0] = 0
boardOneHotEncoded[x][y][1] = 0
return (playerOneHotEncoded, boardOneHotEncoded)
def getCurrentPlayer(self):
return fourInARow.player
def renderHotEncodedState(self, hotEncodedState):
hotEncodedPlayer = hotEncodedState[0]
hotEncodedBoard = hotEncodedState[1]
print(hotEncodedPlayer)
if hotEncodedPlayer[0] == 1:
player = "X"
elif hotEncodedPlayer[1] == 1:
player = "O"
else:
print("No player in state")
print("Player:", player, "\n")
row = " "
for n in range(fourInARow.width):
row += str(n+1) + " "
print(row)
row = "|"
for _ in range(fourInARow.width):
row += "---|"
print(row)
for y in range(fourInARow.height):
row = "|"
for x in range(fourInARow.width):
color = 30# + hotEncodedBoard[2*x + (fourInARow.height-2*y)*fourInARow.width-1]
character = " | |
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
from io import open
from dynet import *
import dynet
from utils import read_conll, read_conll_predict, write_conll, load_embeddings_file
from operator import itemgetter
import utils, time, random, decoder
import numpy as np
from mnnl import FFSequencePredictor, Layer, RNNSequencePredictor, BiRNNSequencePredictor
class OldSlavDep:
def __init__(self, vocab, pos, rels, w2i, c2i, options):
self.model = ParameterCollection()
random.seed(1)
self.trainer = RMSPropTrainer(self.model)
#if options.learning_rate is not None: #Uncomment if model is used to train new parser or update OldSlavNet
# self.trainer = RMSPropTrainer(self.model, options.learning_rate)
#print("RMSPropTrainer initial learning rate:", options.learning_rate)
self.activations = {'tanh': tanh,
'sigmoid': logistic,
'relu': rectify,
'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))
}
self.activation = self.activations[options.activation]
self.blstmFlag = options.blstmFlag
self.labelsFlag = options.labelsFlag
self.costaugFlag = options.costaugFlag
self.bibiFlag = options.bibiFlag
self.ldims = options.lstm_dims #because it is a bi-lstm (NP)
self.wdims = options.wembedding_dims
self.cdims = options.cembedding_dims
self.layers = options.lstm_layers
self.wordsCount = vocab
self.vocab = {word: ind + 3 for word, ind in w2i.items()}
self.pos = {word: ind for ind, word in enumerate(pos)}
self.id2pos = {ind: word for ind, word in enumerate(pos)}
self.c2i = c2i
self.rels = {word: ind for ind, word in enumerate(rels)}
self.irels = rels
self.pdims = options.pembedding_dims
self.vocab['*PAD*'] = 1
self.vocab['*INITIAL*'] = 2
self.wlookup = self.model.add_lookup_parameters((len(vocab) + 3, self.wdims))
self.clookup = self.model.add_lookup_parameters((len(c2i), self.cdims))
self.plookup = self.model.add_lookup_parameters((len(pos), self.pdims))
if options.external_embedding is not None:
ext_embeddings, ext_emb_dim = load_embeddings_file(options.external_embedding, lower=True)
assert (ext_emb_dim == self.wdims)
print("Initializing word embeddings by pre-trained vectors")
count = 0
for word in self.vocab:
_word = str(word, "utf-8")
if _word in ext_embeddings:
count += 1
self.wlookup.init_row(self.vocab[word], ext_embeddings[_word])
print(("Vocab size: %d; #words having pretrained vectors: %d" % (len(self.vocab), count)))
self.pos_builders = [VanillaLSTMBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model),
VanillaLSTMBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model)]
self.pos_bbuilders = [VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model),
VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model)]
if self.bibiFlag:
self.builders = [VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model),
VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model)]
self.bbuilders = [VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model),
VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model)]
elif self.layers > 0:
self.builders = [VanillaLSTMBuilder(self.layers, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model),
VanillaLSTMBuilder(self.layers, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model)]
else:
self.builders = [SimpleRNNBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model),
SimpleRNNBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model)]
self.ffSeqPredictor = FFSequencePredictor(Layer(self.model, self.ldims * 2, len(self.pos), softmax))
self.hidden_units = options.hidden_units
self.hidBias = self.model.add_parameters((self.ldims * 8))
self.hidLayer = self.model.add_parameters((self.hidden_units, self.ldims * 8))
self.hid2Bias = self.model.add_parameters((self.hidden_units))
self.outLayer = self.model.add_parameters((1, self.hidden_units if self.hidden_units > 0 else self.ldims * 8))
if self.labelsFlag:
self.rhidBias = self.model.add_parameters((self.ldims * 8))
self.rhidLayer = self.model.add_parameters((self.hidden_units, self.ldims * 8))
self.rhid2Bias = self.model.add_parameters((self.hidden_units))
self.routLayer = self.model.add_parameters(
(len(self.irels), self.hidden_units if self.hidden_units > 0 else self.ldims * 8))
self.routBias = self.model.add_parameters((len(self.irels)))
self.ffRelPredictor = FFSequencePredictor(
Layer(self.model, self.hidden_units if self.hidden_units > 0 else self.ldims * 8, len(self.irels),
softmax))
self.char_rnn = RNNSequencePredictor(LSTMBuilder(1, self.cdims, self.cdims, self.model))
def __getExpr(self, sentence, i, j):
if sentence[i].headfov is None:
sentence[i].headfov = concatenate([sentence[i].lstms[0], sentence[i].lstms[1]])
if sentence[j].modfov is None:
sentence[j].modfov = concatenate([sentence[j].lstms[0], sentence[j].lstms[1]])
_inputVector = concatenate(
[sentence[i].headfov, sentence[j].modfov, dynet.abs(sentence[i].headfov - sentence[j].modfov),
dynet.cmult(sentence[i].headfov, sentence[j].modfov)])
if self.hidden_units > 0:
output = self.outLayer.expr() * self.activation(
self.hid2Bias.expr() + self.hidLayer.expr() * self.activation(
_inputVector + self.hidBias.expr()))
else:
output = self.outLayer.expr() * self.activation(_inputVector + self.hidBias.expr())
return output
def __evaluate(self, sentence):
exprs = [[self.__getExpr(sentence, i, j) for j in range(len(sentence))] for i in range(len(sentence))]
scores = np.array([[output.scalar_value() for output in exprsRow] for exprsRow in exprs])
return scores, exprs
def pick_neg_log(self, pred, gold):
return -dynet.log(dynet.pick(pred, gold))
def __getRelVector(self, sentence, i, j):
if sentence[i].rheadfov is None:
sentence[i].rheadfov = concatenate([sentence[i].lstms[0], sentence[i].lstms[1]])
if sentence[j].rmodfov is None:
sentence[j].rmodfov = concatenate([sentence[j].lstms[0], sentence[j].lstms[1]])
_outputVector = concatenate(
[sentence[i].rheadfov, sentence[j].rmodfov, abs(sentence[i].rheadfov - sentence[j].rmodfov),
cmult(sentence[i].rheadfov, sentence[j].rmodfov)])
if self.hidden_units > 0:
return self.rhid2Bias.expr() + self.rhidLayer.expr() * self.activation(
_outputVector + self.rhidBias.expr())
else:
return _outputVector
def Save(self, filename):
self.model.save(filename)
def Load(self, filename):
self.model.populate(filename)
def Predict(self, conll_path):
with open(conll_path) as conllFP:
for iSentence, sentence in enumerate(read_conll_predict(conllFP, self.c2i, self.wordsCount)):
conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]
for entry in conll_sentence:
wordvec = self.wlookup[int(self.vocab.get(entry.norm, 0))] if self.wdims > 0 else None
last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])[-1]
rev_last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])[
-1]
entry.vec = concatenate([_f for _f in [wordvec, last_state, rev_last_state] if _f])
entry.pos_lstms = [entry.vec, entry.vec]
entry.headfov = None
entry.modfov = None
entry.rheadfov = None
entry.rmodfov = None
#Predicted pos tags
lstm_forward = self.pos_builders[0].initial_state()
lstm_backward = self.pos_builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.pos_lstms[1] = lstm_forward.output()
rentry.pos_lstms[0] = lstm_backward.output()
for entry in conll_sentence:
entry.pos_vec = concatenate(entry.pos_lstms)
blstm_forward = self.pos_bbuilders[0].initial_state()
blstm_backward = self.pos_bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.pos_vec)
blstm_backward = blstm_backward.add_input(rentry.pos_vec)
entry.pos_lstms[1] = blstm_forward.output()
rentry.pos_lstms[0] = blstm_backward.output()
concat_layer = [concatenate(entry.pos_lstms) for entry in conll_sentence]
outputFFlayer = self.ffSeqPredictor.predict_sequence(concat_layer)
predicted_pos_indices = [np.argmax(o.value()) for o in outputFFlayer]
predicted_postags = [self.id2pos[idx] for idx in predicted_pos_indices]
# Add predicted pos tags for parsing prediction
for entry, posid in zip(conll_sentence, predicted_pos_indices):
entry.vec = concatenate([entry.vec, self.plookup[posid]])
entry.lstms = [entry.vec, entry.vec]
if self.blstmFlag:
lstm_forward = self.builders[0].initial_state()
lstm_backward = self.builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.lstms[1] = lstm_forward.output()
rentry.lstms[0] = lstm_backward.output()
if self.bibiFlag:
for entry in conll_sentence:
entry.vec = concatenate(entry.lstms)
blstm_forward = self.bbuilders[0].initial_state()
blstm_backward = self.bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.vec)
blstm_backward = blstm_backward.add_input(rentry.vec)
entry.lstms[1] = blstm_forward.output()
rentry.lstms[0] = blstm_backward.output()
scores, exprs = self.__evaluate(conll_sentence)
heads = decoder.parse_proj(scores)
# Multiple roots: heading to the previous "rooted" one
rootCount = 0
rootWid = -1
for index, head in enumerate(heads):
if head == 0:
rootCount += 1
if rootCount == 1:
rootWid = index
if rootCount > 1:
heads[index] = rootWid
rootWid = index
for entry, head, pos in zip(conll_sentence, heads, predicted_postags):
entry.pred_parent_id = head
entry.pred_relation = '_'
entry.pred_pos = pos
dump = False
if self.labelsFlag:
concat_layer = [self.__getRelVector(conll_sentence, head, modifier + 1) for modifier, head in
enumerate(heads[1:])]
outputFFlayer = self.ffRelPredictor.predict_sequence(concat_layer)
predicted_rel_indices = [np.argmax(o.value()) for o in outputFFlayer]
predicted_rels = [self.irels[idx] for idx in predicted_rel_indices]
for modifier, head in enumerate(heads[1:]):
conll_sentence[modifier + 1].pred_relation = predicted_rels[modifier]
renew_cg()
if not dump:
yield sentence
def Train(self, conll_path):
eloss = 0.0
mloss = 0.0
eerrors = 0
etotal = 0
start = time.time()
with open(conll_path) as conllFP:
shuffledData = list(read_conll(conllFP, self.c2i))
random.shuffle(shuffledData)
errs = []
lerrs = []
posErrs = []
for iSentence, sentence in enumerate(shuffledData):
if iSentence % 500 == 0 and iSentence != 0:
print("Processing sentence number: %d" % iSentence, ", Loss: %.4f" % (
eloss / etotal), ", Time: %.2f" % (time.time() - start))
start = time.time()
eerrors = 0
eloss = 0.0
etotal = 0
conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]
for entry in conll_sentence:
c = float(self.wordsCount.get(entry.norm, 0))
dropFlag = (random.random() < (c / (0.25 + c)))
wordvec = self.wlookup[
int(self.vocab.get(entry.norm, 0)) if dropFlag else 0] if self.wdims > 0 else None
last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])[-1]
rev_last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])[
-1]
entry.vec = dynet.dropout(concatenate([_f for _f in [wordvec, last_state, rev_last_state] if _f]), 0.33)
entry.pos_lstms = [entry.vec, entry.vec]
entry.headfov = None
entry.modfov = None
entry.rheadfov = None
entry.rmodfov = None
#POS tagging loss
lstm_forward = self.pos_builders[0].initial_state()
lstm_backward = self.pos_builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.pos_lstms[1] = lstm_forward.output()
rentry.pos_lstms[0] = lstm_backward.output()
for entry in conll_sentence:
entry.pos_vec = concatenate(entry.pos_lstms)
blstm_forward = self.pos_bbuilders[0].initial_state()
blstm_backward = self.pos_bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.pos_vec)
blstm_backward = blstm_backward.add_input(rentry.pos_vec)
entry.pos_lstms[1] = blstm_forward.output()
rentry.pos_lstms[0] = blstm_backward.output()
concat_layer = [dynet.dropout(concatenate(entry.pos_lstms), 0.33) for entry in conll_sentence]
outputFFlayer = self.ffSeqPredictor.predict_sequence(concat_layer)
posIDs = [self.pos.get(entry.pos) for entry in conll_sentence]
for pred, gold in zip(outputFFlayer, posIDs):
posErrs.append(self.pick_neg_log(pred, gold))
# Add predicted pos tags
for entry, poses in zip(conll_sentence, outputFFlayer):
entry.vec = concatenate([entry.vec, dynet.dropout(self.plookup[np.argmax(poses.value())], 0.33)])
entry.lstms = [entry.vec, entry.vec]
#Parsing losses
if self.blstmFlag:
lstm_forward = self.builders[0].initial_state()
lstm_backward = self.builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.lstms[1] = lstm_forward.output()
rentry.lstms[0] = lstm_backward.output()
if self.bibiFlag:
for entry in conll_sentence:
entry.vec = | |
"object dependencies.",
req.status_code, nb_id
)
log.debug("NetBox %s status body: %s", req.status_code, req.json())
else:
raise SystemExit(
log.critical(
"Well this in unexpected. Please report this. "
"%s request received %s status with body '%s' and response "
"'%s'.",
req_type.upper(), req.status_code, data, req.json()
)
)
return result
def obj_exists(self, nb_obj_type, vc_data):
"""
Checks whether a NetBox object exists and matches the vCenter object.
If object does not exist or does not match the vCenter object it will
be created or updated.
nb_obj_type: String NetBox object type to query for and compare against
vc_data: Dictionary of vCenter object key value pairs pre-formatted for
NetBox
"""
# NetBox Device Types objects do not have names to query; we catch
# and use the model instead
query_key = self.obj_map[nb_obj_type]["key"]
# Create a query specific to the device parent/child relationship when
# working with interfaces
if nb_obj_type == "interfaces":
query = "?device={}&{}={}".format(
vc_data["device"]["name"], query_key, vc_data[query_key]
)
elif nb_obj_type == "virtual_interfaces":
query = "?virtual_machine={}&{}={}".format(
vc_data["virtual_machine"]["name"], query_key,
vc_data[query_key]
)
else:
query = "?{}={}".format(query_key, vc_data[query_key])
req = self.request(
req_type="get", nb_obj_type=nb_obj_type,
query=query
)
# A single matching object is found so we compare its values to the new
# object
if req["count"] == 1:
log.debug(
"NetBox %s object '%s' already exists. Comparing values.",
nb_obj_type, vc_data[query_key]
)
nb_data = req["results"][0]
# Objects that have been previously tagged as orphaned but then
# reappear in vCenter need to be stripped of their orphaned status
if "tags" in vc_data and "Orphaned" in nb_data["tags"]:
log.info(
"NetBox %s object '%s' is currently marked as orphaned "
"but has reappeared in vCenter. Updating NetBox.",
nb_obj_type, vc_data[query_key]
)
self.request(
req_type="put", nb_obj_type=nb_obj_type, data=vc_data,
nb_id=nb_data["id"]
)
elif compare_dicts(
vc_data, nb_data, dict1_name="vc_data",
dict2_name="nb_data"):
log.info(
"NetBox %s object '%s' match current values. Moving on.",
nb_obj_type, vc_data[query_key]
)
else:
log.info(
"NetBox %s object '%s' do not match current values.",
nb_obj_type, vc_data[query_key]
)
# Issue #1: Ensure existing and new tags are merged together
# This allows users to add alternative tags or sync from
# multiple vCenter instances
if "tags" in vc_data:
log.debug("Merging tags between vCenter and NetBox object.")
vc_data["tags"] = list(
set(vc_data["tags"] + nb_data["tags"])
)
self.request(
req_type="put", nb_obj_type=nb_obj_type, data=vc_data,
nb_id=nb_data["id"]
)
elif req["count"] > 1:
log.warning(
"Search for NetBox %s object '%s' returned %s results but "
"should have only returned 1. Please manually review and "
"report this if the data is accurate. Skipping for safety.",
nb_obj_type, vc_data[query_key], req["count"]
)
else:
log.info(
"Netbox %s '%s' object not found. Requesting creation.",
nb_obj_type,
vc_data[query_key],
)
self.request(
req_type="post", nb_obj_type=nb_obj_type, data=vc_data
)
def sync_objects(self, vc_obj_type):
"""
Collects objects from vCenter and syncs them to NetBox.
Some object types do not support tags so they will be a one-way sync
meaning orphaned objects will not be removed from NetBox.
"""
# Collect data from vCenter
log.info(
"Initiated sync of vCenter %s objects to NetBox.",
vc_obj_type[:-1]
)
vc_objects = self.vc.get_objects(vc_obj_type=vc_obj_type)
# Determine each NetBox object type collected from vCenter
nb_obj_types = list(vc_objects.keys())
for nb_obj_type in nb_obj_types:
log.info(
"Starting sync of %s vCenter %s object%s to NetBox %s "
"object%s.",
len(vc_objects[nb_obj_type]),
vc_obj_type,
"s" if len(vc_objects[nb_obj_type]) != 1 else "",
nb_obj_type,
"s" if len(vc_objects[nb_obj_type]) != 1 else "",
)
for obj in vc_objects[nb_obj_type]:
# Check to ensure IP addresses pass all checks before syncing
# to NetBox
if nb_obj_type == "ip_addresses":
ip_addr = obj["address"]
if verify_ip(ip_addr):
log.debug(
"IP %s has passed necessary pre-checks.",
ip_addr
)
# Update IP address to CIDR notation for comparsion
# with existing NetBox objects
obj["address"] = format_ip(ip_addr)
# Search for parent prefix to assign VRF and tenancy
prefix = self.search_prefix(obj["address"])
# Update placeholder values with matched values
obj["vrf"] = prefix["vrf"]
obj["tenant"] = prefix["tenant"]
else:
log.debug(
"IP %s has failed necessary pre-checks. Skipping "
"sync to NetBox.", ip_addr,
)
continue
self.obj_exists(nb_obj_type=nb_obj_type, vc_data=obj)
log.info(
"Finished sync of %s vCenter %s object%s to NetBox %s "
"object%s.",
len(vc_objects[nb_obj_type]),
vc_obj_type,
"s" if len(vc_objects[nb_obj_type]) != 1 else "",
nb_obj_type,
"s" if len(vc_objects[nb_obj_type]) != 1 else "",
)
# Send vCenter objects to the pruner
if settings.NB_PRUNE_ENABLED:
self.prune_objects(vc_objects, vc_obj_type)
def prune_objects(self, vc_objects, vc_obj_type):
"""
Collects NetBox objects and checks if they still exist in vCenter.
If NetBox objects are not found in the supplied vc_objects data then
they will go through a pruning process.
vc_objects: Dictionary of VC object types and list of their objects
vc_obj_type: The parent object type called during the synce. This is
used to determine whether special filtering needs to be applied.
"""
# Determine qualifying object types based on object map
nb_obj_types = [t for t in vc_objects if self.obj_map[t]["prune"]]
# Sort qualify NetBox object types by prune priority. This ensures
# we do not have issues with deleting due to orphaned dependencies.
nb_obj_types = sorted(
nb_obj_types, key=lambda t: self.obj_map[t]["prune_pref"],
reverse=True
)
for nb_obj_type in nb_obj_types:
log.info(
"Comparing existing NetBox %s objects to current vCenter "
"objects for pruning eligibility.", nb_obj_type
)
nb_objects = self.request(
req_type="get", nb_obj_type=nb_obj_type,
# For tags we cannot search strings containing a period as of
# NetBox 2.6.7 so we search on the slug to be safe
query="?tag={}".format(format_slug(self.vc_tag))
)["results"]
# Certain NetBox object types overlap between vCenter object types
# When pruning, we must differentiate so as not to compare against
# the wrong objects
if vc_obj_type == "hosts" and nb_obj_type == "interfaces":
nb_objects = [
obj for obj in nb_objects
if obj["device"] is not None
]
elif vc_obj_type == "hosts" and nb_obj_type == "ip_addresses":
nb_objects = [
obj for obj in nb_objects
if obj["interface"]["device"] is not None
]
elif vc_obj_type == "virtual_machines" and \
nb_obj_type == "interfaces":
nb_objects = [
obj for obj in nb_objects
if obj["virtual_machine"] is not None
]
elif vc_obj_type == "virtual_machines" and \
nb_obj_type == "ip_addresses":
nb_objects = [
obj for obj in nb_objects
if obj["interface"]["virtual_machine"] is not None
]
# From the vCenter objects provided collect only the names/models of
# each object from the current type we're comparing against
query_key = self.obj_map[nb_obj_type]["key"]
vc_obj_values = [obj[query_key] for obj in vc_objects[nb_obj_type]]
orphans = [
obj for obj in nb_objects if obj[query_key] not in vc_obj_values
]
log.info(
"Comparison completed. %s %s orphaned NetBox object%s did not "
"match.",
len(orphans), nb_obj_type, "s" if len(orphans) != 1 else ""
)
log.debug("The following objects did not match: %s", orphans)
# Pruned items are checked against the prune timer
# All pruned items are first tagged so it is clear why they were
# deleted, and then those items which are greater than the max age
# will be deleted permanently
for orphan in orphans:
log.info(
"Processing orphaned NetBox %s '%s' object.",
nb_obj_type, orphan[query_key]
)
if "Orphaned" not in orphan["tags"]:
log.info(
"No tag found. Adding 'Orphaned' tag to %s '%s' "
"object.",
nb_obj_type, orphan[query_key]
)
tags = {
"tags": ["Synced", "vCenter", self.vc_tag, "Orphaned"]
}
self.request(
req_type="patch", nb_obj_type=nb_obj_type,
nb_id=orphan["id"],
data=tags
)
# Check if the orphan has gone past the max prune timer and
# needs to be deleted
# Dates are in YY, MM, DD format
current_date = date.today()
# Some objects do not have a last_updated field so we must
# handle that gracefully and send for deletion
del_obj = False
try:
modified_date = date(
int(orphan["last_updated"][:4]), # Year
int(orphan["last_updated"][5:7]), # Month
int(orphan["last_updated"][8:10]) # Day
)
# Calculated timedelta then converts it to the days integer
days_orphaned = (current_date - modified_date).days
if days_orphaned >= settings.NB_PRUNE_DELAY_DAYS:
log.info(
"The %s '%s' object has exceeded the %s day max "
"for orphaned objects. Sending it for deletion.",
nb_obj_type, orphan[query_key],
settings.NB_PRUNE_DELAY_DAYS
)
del_obj = True
else:
log.info(
"The %s '%s' object has been orphaned for %s of %s "
"max days. Proceeding to next object.",
nb_obj_type, orphan[query_key], days_orphaned,
settings.NB_PRUNE_DELAY_DAYS
)
except KeyError as err:
log.debug(
"The %s '%s' object does not have a %s "
"field. Sending it for deletion.",
nb_obj_type, orphan[query_key], err
)
del_obj = True
if del_obj:
self.request(
req_type="delete", nb_obj_type=nb_obj_type,
nb_id=orphan["id"],
)
def search_prefix(self, ip_addr):
"""
Queries Netbox for the parent prefix of | |
<reponame>jacer2020/ui2
# coding: utf-8
#
from __future__ import absolute_import, print_function
import base64
import io
import logging
import re
import time
import warnings
import xml.dom.minidom
import requests
import six
from retry import retry
from uiautomator2.exceptions import (RetryError, NullPointerExceptionError,
UiObjectNotFoundError,
UiautomatorQuitError)
from uiautomator2.utils import Exists, U, check_alive, hooks_wrap, intersect, cache_return
from uiautomator2.swipe import SwipeExt
_INPUT_METHOD_RE = re.compile(r'mCurMethodId=([-_./\w]+)')
_fail_prompt_enabled = False
def set_fail_prompt(enable=True):
"""
When Element click through Exception, Prompt user to decide
"""
global _fail_prompt_enabled
_fail_prompt_enabled = enable
def _failprompt(fn):
def _inner(self, *args, **kwargs):
if not _fail_prompt_enabled:
return fn(self, *args, **kwargs)
from uiautomator2 import messagebox
try:
return fn(self, *args, **kwargs)
except UiObjectNotFoundError as e:
result = messagebox.retryskipabort(str(e), 30)
if result == 'retry':
return _inner(self, *args, **kwargs)
elif result == 'skip':
return True
else:
raise
return _inner
class Session(object):
__orientation = ( # device orientation
(0, "natural", "n", 0), (1, "left", "l", 90),
(2, "upsidedown", "u", 180), (3, "right", "r", 270))
def __init__(self, server, pkg_name=None, pid=None):
self.server = server
self._pkg_name = pkg_name
self._pid = pid
self._jsonrpc = server.jsonrpc
if pid and pkg_name:
jsonrpc_url = server.path2url('/session/%d:%s/jsonrpc/0' %
(pid, pkg_name))
self._jsonrpc = server.setup_jsonrpc(jsonrpc_url)
def __repr__(self):
if self._pid and self._pkg_name:
return "<uiautomator2.Session pid:%d pkgname:%s>" % (
self._pid, self._pkg_name)
return super(Session, self).__repr__()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _update_pid(self, pid: int):
""" Update package running pid """
self._pid = pid
jsonrpc_url = self.server.path2url('/session/%d:%s/jsonrpc/0' %
(pid, self._pkg_name))
self._jsonrpc = self.server.setup_jsonrpc(jsonrpc_url)
@property
@cache_return
def widget(self):
from uiautomator2.widget import Widget
return Widget(self)
@property
@cache_return
def swipe_ext(self):
return SwipeExt(self.server)
def _find_element(self, xpath: str, _class=None, pos=None, activity=None, package=None):
raise NotImplementedError()
def implicitly_wait(self, seconds=None):
"""set default wait timeout
Args:
seconds(float): to wait element show up
Deprecated:
recommend use: d.settings['wait_timeout'] = 10
"""
if seconds is None:
return self.server.settings['wait_timeout']
else:
self.server.settings["wait_timeout"] = seconds
# if seconds is not None:
# self.server.wait_timeout = seconds
# return self.server.wait_timeout
def close(self):
""" close app """
if self._pkg_name:
self.server.app_stop(self._pkg_name)
def restart(self, use_monkey=False):
"""
Stop app and start
Raises:
RuntimeError
"""
self.close()
self.server.app_start(self._pkg_name, use_monkey=use_monkey)
pid = self.server.app_wait(self._pkg_name, timeout=3)
if not pid:
raise RuntimeError("app start failed")
self._update_pid(pid)
def running(self):
"""
Check is session is running. return bool
"""
if self._pid and self._pkg_name:
ping_url = self.server.path2url('/session/%d:%s/ping' %
(self._pid, self._pkg_name))
return self.server._reqsess.get(ping_url).text.strip() == 'pong'
# warnings.warn("pid and pkg_name is not set, ping will always return True", Warning, stacklevel=1)
return True
@property
def jsonrpc(self):
return self._jsonrpc
@property
def pos_rel2abs(self):
size = []
def convert(x, y):
assert x >= 0
assert y >= 0
if (x < 1 or y < 1) and not size:
size.extend(
self.server.window_size()) # size will be [width, height]
if x < 1:
x = int(size[0] * x)
if y < 1:
y = int(size[1] * y)
return x, y
return convert
def make_toast(self, text, duration=1.0):
""" Show toast
Args:
text (str): text to show
duration (float): seconds of display
"""
warnings.warn("Use d.toast.show(text, duration) instead.",
DeprecationWarning,
stacklevel=2)
return self.jsonrpc.makeToast(text, duration * 1000)
@property
def toast(self):
obj = self
class Toast(object):
def get_message(self,
wait_timeout=10,
cache_timeout=10,
default=None):
"""
Args:
wait_timeout: seconds of max wait time if toast now show right now
cache_timeout: return immediately if toast showed in recent $cache_timeout
default: default messsage to return when no toast show up
Returns:
None or toast message
"""
deadline = time.time() + wait_timeout
while 1:
message = obj.jsonrpc.getLastToast(cache_timeout * 1000)
if message:
return message
if time.time() > deadline:
return default
time.sleep(.5)
def reset(self):
return obj.jsonrpc.clearLastToast()
def show(self, text, duration=1.0):
return obj.jsonrpc.makeToast(text, duration * 1000)
return Toast()
@check_alive
def set_fastinput_ime(self, enable=True):
""" Enable of Disable FastInputIME """
fast_ime = 'com.github.uiautomator/.FastInputIME'
if enable:
self.server.shell(['ime', 'enable', fast_ime])
self.server.shell(['ime', 'set', fast_ime])
else:
self.server.shell(['ime', 'disable', fast_ime])
@check_alive
def send_keys(self, text: str, clear: bool = False):
"""
Args:
text (str): text to set
clear (bool): clear before set text
Raises:
EnvironmentError
"""
try:
self.wait_fastinput_ime()
btext = U(text).encode('utf-8')
base64text = base64.b64encode(btext).decode()
cmd = "ADB_SET_TEXT" if clear else "ADB_INPUT_TEXT"
self.server.shell(
['am', 'broadcast', '-a', cmd, '--es', 'text', base64text])
return True
except EnvironmentError:
warnings.warn(
"set FastInputIME failed. use \"d(focused=True).set_text instead\"",
Warning)
return self(focused=True).set_text(text)
# warnings.warn("set FastInputIME failed. use \"adb shell input text\" instead", Warning)
# self.server.shell(["input", "text", text.replace(" ", "%s")])
@check_alive
def send_action(self, code):
"""
Simulate input method edito code
Args:
code (str or int): input method editor code
Examples:
send_action("search"), send_action(3)
Refs:
https://developer.android.com/reference/android/view/inputmethod/EditorInfo
"""
self.wait_fastinput_ime()
__alias = {
"go": 2,
"search": 3,
"send": 4,
"next": 5,
"done": 6,
"previous": 7,
}
if isinstance(code, six.string_types):
code = __alias.get(code, code)
self.server.shell([
'am', 'broadcast', '-a', 'ADB_EDITOR_CODE', '--ei', 'code',
str(code)
])
@check_alive
def clear_text(self):
""" clear text
Raises:
EnvironmentError
"""
try:
self.wait_fastinput_ime()
self.server.shell(['am', 'broadcast', '-a', 'ADB_CLEAR_TEXT'])
except EnvironmentError:
# for Android simulator
self(focused=True).clear_text()
def wait_fastinput_ime(self, timeout=5.0):
""" wait FastInputIME is ready
Args:
timeout(float): maxium wait time
Raises:
EnvironmentError
"""
if not self.server.serial: # maybe simulator eg: genymotion, 海马玩模拟器
raise EnvironmentError("Android simulator is not supported.")
deadline = time.time() + timeout
while time.time() < deadline:
ime_id, shown = self.current_ime()
if ime_id != "com.github.uiautomator/.FastInputIME":
self.set_fastinput_ime(True)
time.sleep(0.5)
continue
if shown:
return True
time.sleep(0.2)
raise EnvironmentError("FastInputIME started failed")
def current_ime(self):
""" Current input method
Returns:
(method_id(str), shown(bool)
Example output:
("com.github.uiautomator/.FastInputIME", True)
"""
dim, _ = self.server.shell(['dumpsys', 'input_method'])
m = _INPUT_METHOD_RE.search(dim)
method_id = None if not m else m.group(1)
shown = "mInputShown=true" in dim
return (method_id, shown)
def tap(self, x, y):
"""
alias of click
"""
self.click(x, y)
@property
def touch(self):
"""
ACTION_DOWN: 0 ACTION_MOVE: 2
touch.down(x, y)
touch.move(x, y)
touch.up()
"""
ACTION_DOWN = 0
ACTION_MOVE = 2
ACTION_UP = 1
obj = self
class _Touch(object):
def down(self, x, y):
obj.jsonrpc.injectInputEvent(ACTION_DOWN, x, y, 0)
return self
def move(self, x, y):
obj.jsonrpc.injectInputEvent(ACTION_MOVE, x, y, 0)
return self
def up(self, x, y):
""" ACTION_UP x, y """
obj.jsonrpc.injectInputEvent(ACTION_UP, x, y, 0)
return self
def sleep(self, seconds: float):
time.sleep(seconds)
return self
return _Touch()
def click(self, x, y):
"""
click position
"""
x, y = self.pos_rel2abs(x, y)
self._click(x, y)
@hooks_wrap
def _click(self, x, y):
self.jsonrpc.click(x, y)
if self.server.click_post_delay: # click code delay
time.sleep(self.server.click_post_delay)
def double_click(self, x, y, duration=0.1):
"""
double click position
"""
x, y = self.pos_rel2abs(x, y)
self.touch.down(x, y)
self.touch.up(x, y)
time.sleep(duration)
self.click(x, y) # use click last is for htmlreport
def long_click(self, x, y, duration=None):
'''long click at arbitrary coordinates.
Args:
duration (float): seconds of pressed
'''
if not duration:
duration = 0.5
x, y = self.pos_rel2abs(x, y)
return self._long_click(x, y, duration)
@hooks_wrap
def _long_click(self, x, y, duration):
self.touch.down(x, y)
# self.touch.move(x, y) # maybe can fix
time.sleep(duration)
self.touch.up(x, y)
return self
def swipe(self, fx, fy, tx, ty, duration=0.1, steps=None):
"""
Args:
fx, fy: from position
tx, ty: to position
duration (float): duration
steps: 1 steps is about 5ms, if set, duration will be ignore
Documents:
uiautomator use steps instead of duration
As the document say: Each step execution is throttled to 5ms per step.
Links:
https://developer.android.com/reference/android/support/test/uiautomator/UiDevice.html#swipe%28int,%20int,%20int,%20int,%20int%29
"""
rel2abs = self.pos_rel2abs
fx, fy = rel2abs(fx, fy)
tx, ty = rel2abs(tx, ty)
if not steps:
steps = int(duration * 200)
self._swipe(fx, fy, tx, ty, steps)
@hooks_wrap
def _swipe(self, fx, fy, tx, ty, steps):
return self.jsonrpc.swipe(fx, fy, tx, ty, steps)
def swipe_points(self, points, duration=0.5):
"""
Args:
points: is point array containg at least one point object. eg [[200, 300], [210, 320]]
duration: duration to inject between two points
Links:
https://developer.android.com/reference/android/support/test/uiautomator/UiDevice.html#swipe(android.graphics.Point[], int)
"""
ppoints = []
rel2abs = self.pos_rel2abs
for p in points:
x, y = rel2abs(p[0], p[1])
ppoints.append(x)
ppoints.append(y)
return self.jsonrpc.swipePoints(ppoints, int(duration * 200))
def drag(self, sx, sy, ex, ey, duration=0.5):
'''Swipe from one point to another point.'''
rel2abs = self.pos_rel2abs
sx, sy = rel2abs(sx, sy)
ex, ey = rel2abs(ex, ey)
return self.jsonrpc.drag(sx, sy, ex, ey, int(duration * 200))
@retry((IOError, SyntaxError), delay=.5, tries=5, jitter=0.1,
max_delay=1) # delay .5, .6, .7, .8 ...
def screenshot(self, filename=None, format='pillow'):
"""
Image format is JPEG
Args:
filename (str): saved filename
format (string): used when filename is empty. one of "pillow" or "opencv"
Raises:
IOError, SyntaxError
Examples:
screenshot("saved.jpg")
screenshot().save("saved.png")
cv2.imwrite('saved.jpg', screenshot(format='opencv'))
"""
# Another way to take screenshot is use jsonrpc
# self.jsonrpc.takeScreenshot(1.0, 70) # scale, quality -> base64
r = requests.get(self.server.screenshot_uri, timeout=10)
if filename:
with open(filename, 'wb') as f:
f.write(r.content)
return | |
(left_index and right_index):
left_on = right_on = list(same_named_columns)
return left_on, right_on
@staticmethod
def _get_left_right_indices(lhs, rhs, left_on, right_on, left_index, right_index):
"""
Calculate left and right column indices to perform shuffle on
this is based on the "join" function in cudf file:
cudf/_lib/join.pyx
"""
if left_on is None:
left_on = []
if right_on is None:
right_on = []
left_on_ind = []
right_on_ind = []
if left_index or right_index:
# If either true, we need to process both indices as columns
left_join_cols = list(lhs._index_names) + list(lhs._column_names)
right_join_cols = list(rhs._index_names) + list(rhs._column_names)
if left_index and right_index:
# Both dataframes must take index column indices
left_on_indices = right_on_indices = range(lhs._num_indices)
elif left_index:
# Joins left index columns with right 'on' columns
left_on_indices = range(lhs._num_indices)
right_on_indices = [
right_join_cols.index(on_col) for on_col in right_on
]
elif right_index:
# Joins right index columns with left 'on' columns
right_on_indices = range(rhs._num_indices)
left_on_indices = [
left_join_cols.index(on_col) for on_col in left_on
]
for i_l, i_r in zip(left_on_indices, right_on_indices):
left_on_ind.append(i_l)
right_on_ind.append(i_r)
else:
left_join_cols = list(lhs._index_names) + list(lhs._column_names)
right_join_cols = list(rhs._index_names) + list(rhs._column_names)
# If both left/right_index, joining on indices plus additional cols
# If neither, joining on just cols, not indices
# In both cases, must match up additional column indices in lhs/rhs
if left_index == right_index:
for name in left_on:
left_on_ind.append(left_join_cols.index(name))
for name in right_on:
right_on_ind.append(right_join_cols.index(name))
return left_on_ind, right_on_ind
def _get_column_indices(self) -> List[int]:
"""
Get the column indices excluding index columns
:return: list of ints
"""
lists = DataFrame._get_all_column_indices([self])
return lists[0]
@staticmethod
def _get_all_column_indices(dfs) -> List[List[int]]:
"""
Get indices of all DataFrames excluding index columns
This is to calculate indices of columns that will be used
to perform partitioning/shuffling on the dataframe
:param dfs: list of DataFrame objects
:return: list of list of column indices
"""
all_df_indices = [];
for cdf in dfs:
df_indices = [*range(cdf._cdf._num_indices, cdf._cdf._num_indices + cdf._cdf._num_columns)]
all_df_indices.append(df_indices)
return all_df_indices
@staticmethod
def _get_all_common_indices(dfs) -> List[List[int]]:
"""
Get indices of all columns common in all DataFrames
Columns might be in different indices in different DataFrames
This is to calculate indices of columns that will be used
to perform partitioning/shuffling on the dataframe
:param dfs: list of DataFrame objects
:return: list of list of column indices
"""
# get the inersection of all column names
common_columns_names = DataFrame._get_common_column_names(dfs)
if len(common_columns_names) == 0:
raise ValueError("There is no common column names among the provided DataFrame objects")
all_df_indices = [];
for cdf in dfs:
df_indices = []
col_names = list(cdf._cdf._index_names) + list(cdf._cdf._column_names)
for name in common_columns_names:
df_indices.append(col_names.index(name))
all_df_indices.append(df_indices)
return all_df_indices
@staticmethod
def _get_common_column_names(dfs) -> List[str]:
"""
Get common column names in the proved DataFrames
:param dfs: list of DataFrame objects
:return: list of column names that are common to all DataFrames
"""
column_name_lists = [list(obj._cdf._column_names) for obj in dfs]
common_column_names = set(column_name_lists[0])
for column_names in column_name_lists[1:]:
common_column_names = common_column_names & set(column_names)
return common_column_names
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
env: CylonEnv = None) -> Union[DataFrame or None]:
"""
Remove duplicate rows from the DataFrame.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False: Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
inplace is supported only in local mode
when there are multiple workers in the computation, inplace is disabled
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
env: CylonEnv object
Returns
-------
DataFrame or None
DataFrame with duplicates removed or
None if ``inplace=True`` and in the local mode with no distributed workers.
"""
subset = self._convert_subset(subset=subset, ignore_len_check=True)
if env is None or env.world_size == 1:
dropped_df = self._cdf.drop_duplicates(subset=subset, keep=keep, inplace=inplace, ignore_index=ignore_index)
return DataFrame.from_cudf(dropped_df) if not inplace else None
shuffle_column_indices = []
for name in subset:
shuffle_column_indices.append(self._cdf._num_indices + self._cdf._column_names.index(name))
shuffled_df = _shuffle(self._cdf, hash_columns=shuffle_column_indices, env=env)
dropped_df = shuffled_df.drop_duplicates(subset=subset, keep=keep, inplace=inplace, ignore_index=ignore_index)
return DataFrame.from_cudf(shuffled_df) if inplace else DataFrame.from_cudf(dropped_df)
def set_index(
self,
keys,
drop=True,
append=False,
inplace=False,
verify_integrity=False,
) -> Union[DataFrame or None]:
"""Return a new DataFrame with a new index
Parameters
----------
keys : Index, Series-convertible, label-like, or list
Index : the new index.
Series-convertible : values for the new index.
Label-like : Label of column to be used as index.
List : List of items from above.
drop : boolean, default True
Whether to drop corresponding column for str index argument
append : boolean, default True
Whether to append columns to the existing index,
resulting in a MultiIndex.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : boolean, default False
Check for duplicates in the new index.
Returns
-------
DataFrame or None
DataFrame with a new index or
None if ``inplace=True``
Examples
--------
>>> df = cudf.DataFrame({
... "a": [1, 2, 3, 4, 5],
... "b": ["a", "b", "c", "d","e"],
... "c": [1.0, 2.0, 3.0, 4.0, 5.0]
... })
>>> df
a b c
0 1 a 1.0
1 2 b 2.0
2 3 c 3.0
3 4 d 4.0
4 5 e 5.0
Set the index to become the ‘b’ column:
>>> df.set_index('b')
a c
b
a 1 1.0
b 2 2.0
c 3 3.0
d 4 4.0
e 5 5.0
Create a MultiIndex using columns ‘a’ and ‘b’:
>>> df.set_index(["a", "b"])
c
a b
1 a 1.0
2 b 2.0
3 c 3.0
4 d 4.0
5 e 5.0
Set new Index instance as index:
>>> df.set_index(cudf.RangeIndex(10, 15))
a b c
10 1 a 1.0
11 2 b 2.0
12 3 c 3.0
13 4 d 4.0
14 5 e 5.0
Setting `append=True` will combine current index with column `a`:
>>> df.set_index("a", append=True)
b c
a
0 1 a 1.0
1 2 b 2.0
2 3 c 3.0
3 4 d 4.0
4 5 e 5.0
`set_index` supports `inplace` parameter too:
>>> df.set_index("a", inplace=True)
>>> df
b c
a
1 a 1.0
2 b 2.0
3 c 3.0
4 d 4.0
5 e 5.0
"""
indexed_df = self._cdf.set_index(keys=keys, drop=drop, append=append, inplace=inplace,
verify_integrity=verify_integrity)
return DataFrame.from_cudf(indexed_df) if indexed_df else None
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
) -> Union[DataFrame or None]:
"""
Reset the index.
Reset the index of the DataFrame, and use the default one instead.
Parameters
----------
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
Examples
--------
>>> df = cudf.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal <NA>
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal <NA>
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal <NA>
"""
indexed_df = self._cdf.reset_index(level=level, drop=drop, inplace=inplace, col_level=col_level, col_fill=col_fill)
return DataFrame.from_cudf(indexed_df) if indexed_df else None
def _convert_subset(self,
subset: Union[Hashable, Sequence[Hashable]],
ignore_len_check: bool = False) -> Iterable[Hashable]:
"""
convert the subset to Iterable[Hashable]
if the any value in subset does not exist in column names, raise an error
based on: cudf.core.frame.Frame.drop_duplicates
Returns
-------
List/Tuple of column names
"""
if subset is None:
subset = self._cdf._column_names
elif (
not np.iterable(subset)
| |
<filename>examples/seismic/skew_self_adjoint/wavesolver.py
from devito import Function, TimeFunction
from examples.seismic import PointSource, Receiver
from examples.seismic.skew_self_adjoint.utils import setup_w_over_q, compute_critical_dt
from examples.seismic.skew_self_adjoint.operators import IsoFwdOperator, IsoAdjOperator, \
IsoJacobianFwdOperator, IsoJacobianAdjOperator
class SsaIsoAcousticWaveSolver(object):
"""
Solver object for a scalar isotropic variable density visco- acoustic skew
self adjoint wave equation that provides operators for seismic inversion problems
and encapsulates the time and space discretization for a given problem setup.
Parameters
----------
npad : int, required
Number of points in the absorbing boundary.
Typically set to 50.
omega : float, required
Center circular frequency for dissipation only attenuation.
qmin : float, required
Minimum Q value on the exterior of the absorbing boundary.
Typically set to 0.1.
qmax : float, required
Maximum Q value in the interior of the model.
Typically set to 100.0.
b : Function, required
Physical model with buoyancy (m^3/kg).
v : Function, required
Physical model with velocity (m/msec).
src : SparseTimeFunction (PointSource)
Source position and time signature.
rec : SparseTimeFunction (PointSource)
Receiver positions and time signature.
time_axis : TimeAxis
Defines temporal sampling.
space_order: int, optional
Order of the spatial stencil discretisation. Defaults to 8.
"""
def __init__(self, npad, qmin, qmax, omega, b, v, src_coords, rec_coords,
time_axis, space_order=8, **kwargs):
self.npad = npad
self.qmin = qmin
self.qmax = qmax
self.omega = omega
self.b = b
self.v = v
self.src_coords = src_coords
self.rec_coords = rec_coords
self.time_axis = time_axis
self.space_order = space_order
# Determine temporal sampling using compute_critical_dt in utils.py
self.dt = compute_critical_dt(v)
# Cache compiler options
self._kwargs = kwargs
# Create the wOverQ Function
wOverQ = Function(name='wOverQ', grid=v.grid, space_order=v.space_order)
setup_w_over_q(wOverQ, omega, qmin, qmax, npad)
self.wOverQ = wOverQ
def forward(self, src, rec=None, b=None, v=None, wOverQ=None, u=None,
save=None, **kwargs):
"""
Forward modeling function that creates the necessary
data objects for running a forward modeling operator.
No required parameters.
Parameters
----------
src : SparseTimeFunction, required
Time series data for the injected source term.
rec : SparseTimeFunction, optional, defaults to new rec
The interpolated receiver data.
b : Function or float, optional, defaults to b at construction
The time-constant buoyancy.
v : Function or float, optional, defaults to v at construction
The time-constant velocity.
wOverQ : Function or float, optional, defaults to wOverQ at construction
The time-constant dissipation only attenuation w/Q field.
u : Function or float, optional, defaults to new TimeFunction
Stores the computed wavefield.
save : int or Buffer, optional
The entire (unrolled) wavefield.
Returns
----------
Receiver time series data, TimeFunction wavefield u, and performance summary
"""
# src is required
# Get rec: rec can change, create new if not passed
rec = rec or Receiver(name='rec', grid=self.v.grid,
time_range=self.time_axis,
coordinates=self.rec_coords)
# Get (b, v, wOverQ) from passed arguments or from (b, v, wOverQ) at construction
b = b or self.b
v = v or self.v
wOverQ = wOverQ or self.wOverQ
# ensure src, rec, b, v, wOverQ all share the same underlying grid
assert src.grid == rec.grid == b.grid == v.grid == wOverQ.grid
# Make dictionary of the physical model properties
model = {'b': b, 'v': v, 'wOverQ': wOverQ}
# Create the wavefield if not provided
u = u or TimeFunction(name='u', grid=self.v.grid,
save=self.time_axis.num if save else None,
time_order=2, space_order=self.space_order)
# Build the operator and execute
op = IsoFwdOperator(model, src, rec, self.time_axis, space_order=self.space_order,
save=self.time_axis.num if save else None, **self._kwargs)
rec.data[:] = 0
u.data[:] = 0
summary = op.apply(u=u, **kwargs)
return rec, u, summary
def adjoint(self, rec, src=None, b=None, v=None, wOverQ=None, u=None,
save=None, **kwargs):
"""
Adjoint modeling function that creates the necessary
data objects for running a adjoint modeling operator.
Required parameters: rec.
Parameters
----------
rec : SparseTimeFunction, required
The interpolated receiver data to be injected.
src : SparseTimeFunction, optional, defaults to new src
Time series data for the adjoint source term.
b : Function or float, optional, defaults to b at construction
The time-constant buoyancy.
v : Function or float, optional, defaults to v at construction
The time-constant velocity.
wOverQ : Function or float, optional, defaults to wOverQ at construction
The time-constant dissipation only attenuation w/Q field.
ua : Function or float, optional, defaults to new TimeFunction
Stores the computed adjoint wavefield.
save : int or Buffer, optional
The entire (unrolled) wavefield.
Returns
----------
Adjoint source time series data, wavefield TimeFunction ua,
and performance summary
"""
# rec is required
# Get src: src can change, create new if not passed
src = src or PointSource(name='src', grid=self.v.grid,
time_range=self.time_axis,
coordinates=self.src_coords)
# Get (b, v, wOverQ) from passed arguments or from (b, v, wOverQ) at construction
b = b or self.b
v = v or self.v
wOverQ = wOverQ or self.wOverQ
# ensure src, rec, b, v, wOverQ all share the same underlying grid
assert src.grid == rec.grid == b.grid == v.grid == wOverQ.grid
# Make dictionary of the physical model properties
model = {'b': b, 'v': v, 'wOverQ': wOverQ}
# Create the adjoint wavefield if not provided
u = u or TimeFunction(name='u', grid=self.v.grid,
save=self.time_axis.num if save else None,
time_order=2, space_order=self.space_order)
# Build the operator and execute
op = IsoAdjOperator(model, src, rec, self.time_axis, space_order=self.space_order,
save=self.time_axis.num if save else None, **self._kwargs)
src.data[:] = 0
u.data[:] = 0
summary = op.apply(u=u, **kwargs)
return src, u, summary
def jacobian_forward(self, dm, src, rec=None, b=None, v=None, wOverQ=None,
u0=None, du=None, save=None, **kwargs):
"""
Linearized JacobianForward modeling function that creates the necessary
data objects for running a Jacobian forward modeling operator.
Required parameters: dm.
Parameters
----------
dm : Function or float, required
The perturbation to the velocity model.
src : SparseTimeFunction, required
Time series data for the injected source term.
rec : SparseTimeFunction, optional, defaults to new rec
The interpolated receiver data.
b : Function or float, optional, defaults to b at construction
The time-constant buoyancy.
v : Function or float, optional, defaults to v at construction
The time-constant velocity.
wOverQ : Function or float, optional, defaults to wOverQ at construction
The time-constant dissipation only attenuation w/Q field.
u0 : Function or float, optional, defaults to new TimeFunction
Stores the computed background wavefield.
du : Function or float, optional, defaults to new TimeFunction
Stores the computed perturbed wavefield.
save : int or Buffer, optional
The entire (unrolled) wavefield.
Returns
----------
Receiver time series data rec, TimeFunction background wavefield u0,
TimeFunction perturbation wavefield du, and performance summary
"""
# src is required
# Get rec: rec can change, create new if not passed
rec = rec or Receiver(name='rec', grid=self.v.grid,
time_range=self.time_axis,
coordinates=self.rec_coords)
# Get (b, v, wOverQ) from passed arguments or from (b, v, wOverQ) at construction
b = b or self.b
v = v or self.v
wOverQ = wOverQ or self.wOverQ
# ensure src, rec, b, v, wOverQ all share the same underlying grid
assert src.grid == rec.grid == b.grid == v.grid == wOverQ.grid
# Make dictionary of the physical model properties
model = {'b': b, 'v': v, 'wOverQ': wOverQ}
# Create the wavefields if not provided
u0 = u0 or TimeFunction(name='u0', grid=self.v.grid,
save=self.time_axis.num if save else None,
time_order=2, space_order=self.space_order)
du = du or TimeFunction(name='du', grid=self.v.grid,
time_order=2, space_order=self.space_order)
# Build the operator and execute
op = IsoJacobianFwdOperator(model, src, rec, self.time_axis,
space_order=self.space_order,
save=self.time_axis.num if save else None,
**self._kwargs)
rec.data[:] = 0
u0.data[:] = 0
du.data[:] = 0
summary = op.apply(dm=dm, u0=u0, du=du, **kwargs)
return rec, u0, du, summary
def jacobian_adjoint(self, rec, u0, b=None, v=None, wOverQ=None,
dm=None, du=None, save=None, **kwargs):
"""
Linearized JacobianForward modeling function that creates the necessary
data objects for running a Jacobian forward modeling operator.
Required parameters: rec, u0.
Parameters
----------
rec : SparseTimeFunction, required
The interpolated receiver data to be injected.
u0 : Function or float, required, (created with save=True)
Stores the computed background wavefield.
b : Function or float, optional, defaults to b at construction
The time-constant buoyancy.
v : Function or float, optional, defaults to v at construction
The time-constant velocity.
wOverQ : Function or float, optional, defaults to wOverQ at construction
The time-constant dissipation only attenuation w/Q field.
dm : Function or float, optional, defaults to new Function
The perturbation to the | |
<reponame>LaudateCorpus1/ppo-ewma<gh_stars>10-100
"""
Mostly copied from ppo.py but with some extra options added that are relevant to phasic
"""
import numpy as np
import torch as th
from queue import Queue
from mpi4py import MPI
from functools import partial
from .tree_util import tree_map, tree_multimap
from . import torch_util as tu
from .log_save_helper import LogSaveHelper
from .minibatch_optimize import minibatch_optimize
from .roller import Roller
from .reward_normalizer import RewardNormalizer
import math
from . import logger
INPUT_KEYS = {"ob", "ac", "first", "logp", "rec_logp", "vtarg", "adv", "state_in"}
def compute_gae(
*,
vpred: "(th.Tensor[1, float]) value predictions",
reward: "(th.Tensor[1, float]) rewards",
first: "(th.Tensor[1, bool]) mark beginning of episodes",
γ: "(float)",
λ: "(float)"
):
orig_device = vpred.device
assert orig_device == reward.device == first.device
vpred, reward, first = (x.cpu() for x in (vpred, reward, first))
first = first.to(dtype=th.float32)
assert first.dim() == 2
nenv, nstep = reward.shape
assert vpred.shape == first.shape == (nenv, nstep + 1)
adv = th.zeros(nenv, nstep, dtype=th.float32)
lastgaelam = 0
for t in reversed(range(nstep)):
notlast = 1.0 - first[:, t + 1]
nextvalue = vpred[:, t + 1]
# notlast: whether next timestep is from the same episode
delta = reward[:, t] + notlast * γ * nextvalue - vpred[:, t]
adv[:, t] = lastgaelam = delta + notlast * γ * λ * lastgaelam
vtarg = vpred[:, :-1] + adv
return adv.to(device=orig_device), vtarg.to(device=orig_device)
def log_vf_stats(comm, **kwargs):
logger.logkv(
"VFStats/EV", tu.explained_variance(kwargs["vpred"], kwargs["vtarg"], comm)
)
for key in ["vpred", "vtarg", "adv"]:
logger.logkv_mean(f"VFStats/{key.capitalize()}Mean", kwargs[key].mean())
logger.logkv_mean(f"VFStats/{key.capitalize()}Std", kwargs[key].std())
def compute_advantage(model, seg, γ, λ, comm=None, adv_moments=None):
comm = comm or MPI.COMM_WORLD
finalob, finalfirst = seg["finalob"], seg["finalfirst"]
vpredfinal = model.v(finalob, finalfirst, seg["finalstate"])
reward = seg["reward"]
logger.logkv("Misc/FrameRewMean", reward.mean())
adv, vtarg = compute_gae(
γ=γ,
λ=λ,
reward=reward,
vpred=th.cat([seg["vpred"], vpredfinal[:, None]], dim=1),
first=th.cat([seg["first"], finalfirst[:, None]], dim=1),
)
log_vf_stats(comm, adv=adv, vtarg=vtarg, vpred=seg["vpred"])
seg["vtarg"] = vtarg
adv_mean, adv_var = tu.mpi_moments(comm, adv)
if adv_moments is not None:
adv_moments.update(adv_mean, adv_var, adv.numel() * comm.size)
adv_mean, adv_var = adv_moments.moments()
logger.logkv_mean("VFStats/AdvEwmaMean", adv_mean)
logger.logkv_mean("VFStats/AdvEwmaStd", math.sqrt(adv_var))
seg["adv"] = (adv - adv_mean) / (math.sqrt(adv_var) + 1e-8)
def tree_cat(trees):
return tree_multimap(lambda *xs: th.cat(xs, dim=0), *trees)
def recompute_logp(*, model, seg, mbsize):
b = tu.batch_len(seg)
with th.no_grad():
logps = []
for inds in th.arange(b).split(mbsize):
mb = tu.tree_slice(seg, inds)
pd, _, _, _ = model(mb["ob"], mb["first"], mb["state_in"])
logp = tu.sum_nonbatch(pd.log_prob(mb["ac"]))
logps.append(logp)
seg["rec_logp"] = tree_cat(logps)
def compute_losses(
model,
model_ewma,
ob,
ac,
first,
logp,
rec_logp,
vtarg,
adv,
state_in,
clip_param,
vfcoef,
entcoef,
kl_penalty,
imp_samp_max,
):
losses = {}
diags = {}
pd, vpred, aux, _state_out = model(ob=ob, first=first, state_in=state_in)
newlogp = tu.sum_nonbatch(pd.log_prob(ac))
if model_ewma is not None:
pd_ewma, _vpred_ewma, _, _state_out_ewma = model_ewma(
ob=ob, first=first, state_in=state_in
)
rec_logp = tu.sum_nonbatch(pd_ewma.log_prob(ac))
# prob ratio for KL / clipping based on a (possibly) recomputed logp
logratio = newlogp - rec_logp
# stale data can give rise to very large importance sampling ratios,
# especially when using the wrong behavior policy,
# so we need to clip them for numerical stability.
# this can introduce bias, but by default we only clip extreme ratios
# to minimize this effect
logp_adj = logp
if imp_samp_max > 0:
logp_adj = th.max(logp, newlogp.detach() - math.log(imp_samp_max))
# because of the large importance sampling ratios again,
# we need to handle the ratios in log space for numerical stability
pg_losses = -adv * th.exp(newlogp - logp_adj)
if clip_param > 0:
clipped_logratio = th.clamp(logratio, math.log(1.0 - clip_param), math.log(1.0 + clip_param))
pg_losses2 = -adv * th.exp(clipped_logratio + rec_logp - logp_adj)
pg_losses = th.max(pg_losses, pg_losses2)
diags["entropy"] = entropy = tu.sum_nonbatch(pd.entropy()).mean()
diags["negent"] = -entropy * entcoef
diags["pg"] = pg_losses.mean()
diags["pi_kl"] = kl_penalty * 0.5 * (logratio ** 2).mean()
losses["pi"] = diags["negent"] + diags["pg"] + diags["pi_kl"]
losses["vf"] = vfcoef * ((vpred - vtarg) ** 2).mean()
with th.no_grad():
if clip_param > 0:
diags["clipfrac"] = th.logical_or(
logratio < math.log(1.0 - clip_param),
logratio > math.log(1.0 + clip_param),
).float().mean()
diags["approxkl"] = 0.5 * (logratio ** 2).mean()
if imp_samp_max > 0:
diags["imp_samp_clipfrac"] = (newlogp - logp > math.log(imp_samp_max)).float().mean()
return losses, diags
class EwmaMoments:
"""
Calculate rolling moments using EWMAs.
"""
def __init__(self, ewma_decay):
self.ewma_decay = ewma_decay
self.w = 0.0
self.ww = 0.0 # sum of squared weights
self.wsum = 0.0
self.wsumsq = 0.0
def update(self, mean, var, count, *, ddof=0):
self.w *= self.ewma_decay
self.ww *= self.ewma_decay ** 2
self.wsum *= self.ewma_decay
self.wsumsq *= self.ewma_decay
self.w += count
self.ww += count
self.wsum += mean * count
self.wsumsq += (count - ddof) * var + count * mean ** 2
def moments(self, *, ddof=0):
mean = self.wsum / self.w
# unbiased weighted sample variance:
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights
var = (self.wsumsq - self.wsum ** 2 / self.w) / (self.w - ddof * self.ww / self.w)
return mean, var
def learn(
*,
venv: "(VecEnv) vectorized environment",
model: "(ppo.PpoModel)",
model_ewma: "(ppg.EwmaModel) alternate model used for clipping or the KL penalty",
interacts_total: "(float) total timesteps of interaction" = float("inf"),
nstep: "(int) number of serial timesteps" = 256,
γ: "(float) discount" = 0.99,
λ: "(float) GAE parameter" = 0.95,
clip_param: "(float) PPO parameter for clipping prob ratio" = 0.2,
vfcoef: "(float) value function coefficient" = 0.5,
entcoef: "(float) entropy coefficient" = 0.01,
nminibatch: "(int) number of minibatches to break epoch of data into" = 4,
n_epoch_vf: "(int) number of epochs to use when training the value function" = 1,
n_epoch_pi: "(int) number of epochs to use when training the policy" = 1,
lr: "(float) Adam learning rate" = 5e-4,
beta1: "(float) Adam beta1" = 0.9,
beta2: "(float) Adam beta2" = 0.999,
default_loss_weights: "(dict) default_loss_weights" = {},
store_segs: "(bool) whether or not to store segments in a buffer" = True,
verbose: "(bool) print per-epoch loss stats" = True,
log_save_opts: "(dict) passed into LogSaveHelper" = {},
rnorm: "(bool) reward normalization" = True,
kl_penalty: "(int) weight of the KL penalty, which can be used in place of clipping" = 0,
adv_ewma_decay: "(float) EWMA decay for advantage normalization" = 0.0,
grad_weight: "(float) relative weight of this worker's gradients" = 1,
comm: "(MPI.Comm) MPI communicator" = None,
callbacks: "(seq of function(dict)->bool) to run each update" = (),
learn_state: "dict with optional keys {'opts', 'roller', 'lsh', 'reward_normalizer', 'curr_interact_count', 'seg_buf', 'segs_delayed', 'adv_moments'}" = None,
staleness: "(int) number of iterations by which to make data artificially stale, for experimentation" = 0,
staleness_loss: "(str) one of 'decoupled', 'behavior' or 'proximal', only used if staleness > 0" = "decoupled",
imp_samp_max: "(float) value at which to clip importance sampling ratio" = 100.0,
):
if comm is None:
comm = MPI.COMM_WORLD
learn_state = learn_state or {}
ic_per_step = venv.num * comm.size * nstep
opt_keys = (
["pi", "vf"] if (n_epoch_pi != n_epoch_vf) else ["pi"]
) # use separate optimizers when n_epoch_pi != n_epoch_vf
params = list(model.parameters())
opts = learn_state.get("opts") or {
k: th.optim.Adam(params, lr=lr, betas=(beta1, beta2))
for k in opt_keys
}
tu.sync_params(params)
if rnorm:
reward_normalizer = learn_state.get("reward_normalizer") or RewardNormalizer(venv.num)
else:
reward_normalizer = None
def get_weight(k):
return default_loss_weights[k] if k in default_loss_weights else 1.0
def train_with_losses_and_opt(loss_keys, opt, **arrays):
losses, diags = compute_losses(
model,
model_ewma=model_ewma,
entcoef=entcoef,
kl_penalty=kl_penalty,
clip_param=clip_param,
vfcoef=vfcoef,
imp_samp_max=imp_samp_max,
**arrays,
)
loss = sum([losses[k] * get_weight(k) for k in loss_keys])
opt.zero_grad()
loss.backward()
tu.warn_no_gradient(model, "PPO")
tu.sync_grads(params, grad_weight=grad_weight)
diags = {k: v.detach() for (k, v) in diags.items()}
opt.step()
if "pi" in loss_keys and model_ewma is not None:
model_ewma.update()
diags.update({f"loss_{k}": v.detach() for (k, v) in losses.items()})
return diags
def train_pi(**arrays):
return train_with_losses_and_opt(["pi"], opts["pi"], **arrays)
def train_vf(**arrays):
return train_with_losses_and_opt(["vf"], opts["vf"], **arrays)
def train_pi_and_vf(**arrays):
return train_with_losses_and_opt(["pi", "vf"], opts["pi"], **arrays)
roller = learn_state.get("roller") or Roller(
act_fn=model.act,
venv=venv,
initial_state=model.initial_state(venv.num),
keep_buf=100,
keep_non_rolling=log_save_opts.get("log_new_eps", False),
)
lsh = learn_state.get("lsh") or LogSaveHelper(
ic_per_step=ic_per_step, model=model, comm=comm, **log_save_opts
)
callback_exit = False # Does callback say to exit loop?
curr_interact_count = learn_state.get("curr_interact_count") or 0
curr_iteration = 0
seg_buf = learn_state.get("seg_buf") or []
segs_delayed = learn_state.get("segs_delayed") or Queue(maxsize=staleness + 1)
adv_moments = learn_state.get("adv_moments") or EwmaMoments(adv_ewma_decay)
while curr_interact_count < interacts_total and not callback_exit:
seg = roller.multi_step(nstep)
lsh.gather_roller_stats(roller)
if staleness > 0:
segs_delayed.put(seg)
if not segs_delayed.full():
continue
seg = segs_delayed.get()
if staleness_loss == "behavior":
seg["rec_logp"] = seg["logp"]
else:
recompute_logp(model=model, seg=seg, mbsize=4)
if staleness_loss == "proximal":
seg["logp"] = seg["rec_logp"]
else:
seg["rec_logp"] = seg["logp"]
if rnorm:
seg["reward"] = reward_normalizer(seg["reward"], seg["first"])
compute_advantage(model, seg, γ, λ, comm=comm, adv_moments=adv_moments)
if store_segs:
seg_buf.append(tree_map(lambda x: x.cpu(), seg))
with logger.profile_kv("optimization"):
# when n_epoch_pi != n_epoch_vf, we | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This combines configurable build-time constants (documented on REPO_CFG
# below), and non-configurable constants that are currently not namespaced.
#
# Note that there's no deep reason for this struct / non-struct split, so we
# could easily move everything into the struct.
#
load("//antlir/bzl:oss_shim.bzl", "config", "do_not_use_repo_cfg")
load("//antlir/bzl:sha256.bzl", "sha256_b64")
load("//antlir/bzl:shape.bzl", "shape")
load(":snapshot_install_dir.bzl", "RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR", "snapshot_install_dir")
load(":target_helpers.bzl", "normalize_target")
# A label for non image feature targets to mirror the behaviour of `type = "image_feature"` in
# `private_do_not_use_feature_json_genrule` which allows the target to be queried.
ANTLIR_INTERNAL_TREAT_AS_IMAGE_FEATURE_LABEL = "antlir_internal__treat_as_image_feature_for_deps"
DO_NOT_USE_BUILD_APPLIANCE = "__DO_NOT_USE_BUILD_APPLIANCE__"
CONFIG_KEY = "antlir"
BZL_CONST = shape.new(
shape.shape(
layer_feature_suffix = str,
PRIVATE_feature_suffix = str,
version_set_allow_all_versions = str,
),
layer_feature_suffix = "__layer-feature",
# Do NOT use this outside of Antlir internals. See "Why are `feature`s
# forbidden as dependencies?" in `bzl/image/feature/new.bzl` for a
# detailed explanation.
PRIVATE_feature_suffix = "_IF_YOU_REFER_TO_THIS_RULE_YOUR_DEPENDENCIES_WILL_BE_BROKEN",
version_set_allow_all_versions = "__VERSION_SET_ALLOW_ALL_VERSIONS__",
)
def version_set_override_name(current_target):
return "vset-override-" + sha256_b64(current_target)
# Use `_get_str_cfg` or `_get_str_list_cfg` instead.
def _do_not_use_directly_get_cfg(name, default = None):
# Allow `buck -c` overrides from the command-line
val = native.read_config(CONFIG_KEY, name)
if val != None:
return val
val = do_not_use_repo_cfg.get(name)
if val != None:
return val
return default
# We don't have "globally required" configs because code that requires a
# config will generally loudly fail on a config value that is None.
def _get_str_cfg(name, default = None, allow_none = False):
ret = _do_not_use_directly_get_cfg(name, default = default)
if not allow_none and ret == None:
fail("Repo config must set key {}".format(name))
return ret
# Defaults to the empty list if the config is not set.
#
# We use space to separate plurals because spaces are not allowed in target
# paths, and also because that's what `.buckconfig` is supposed to support
# for list configs (but does not, due to bugs).
def _get_str_list_cfg(name, separator = " ", default = None):
s = _do_not_use_directly_get_cfg(name)
return s.split(separator) if s else (default or [])
# Defaults to the empty list if the config is not set
def _get_version_set_to_path():
lst = _get_str_list_cfg("version_set_to_path")
vs_to_path = dict(zip(lst[::2], lst[1::2]))
if 2 * len(vs_to_path) != len(lst):
fail("antlir.version_set_to_path is a space-separated dict: k1 v1 k2 v2")
# A layer can turn off version locking
# via `version_set = BZL_CONST.version_set_allow_all_versions`.
vs_to_path[BZL_CONST.version_set_allow_all_versions] = "TROLLING TROLLING TROLLING"
return vs_to_path
# Defaults to the empty list if the config is not set
def _get_artifact_key_to_path():
lst = _get_str_list_cfg("artifact_key_to_path")
key_to_path = dict(zip(lst[::2], lst[1::2]))
if 2 * len(key_to_path) != len(lst):
fail("antlir.artifact_key_to_path is a space-separated dict: k1 v1 k2 v2")
return key_to_path
_nevra_t = shape.shape(
name = shape.field(str),
# TODO: Codemod all callsites and update this to be `int`.
epoch = shape.field(str),
version = shape.field(str),
release = shape.field(str),
arch = shape.field(str),
)
def new_nevra(**kwargs):
return shape.new(_nevra_t, **kwargs)
# These are configuration keys that can be grouped under a specific common
# name called flavor. This way, during run-time, we can choose default
# values for set of configuration keys based on selected flavor name.
_flavor_config_t = shape.shape(
name = shape.field(str),
# FIXME: Ideally, remove `optional = True`. This field is not optional,
# per `new_flavor_config` below, but expressing that requires changing
# the wire format for `DO_NOT_USE_BUILD_APPLIANCE` to be a string
# instead of `None` -- see `new_flavor_config`. This needs a Python fix.
build_appliance = shape.field(str, optional = True),
rpm_installer = shape.field(str, optional = True),
rpm_repo_snapshot = shape.field(str, optional = True),
version_set_path = shape.field(str, optional = True),
rpm_version_set_overrides = shape.list(_nevra_t, optional = True),
unsafe_bypass_flavor_check = shape.field(bool, optional = True),
)
# This keeps the type private, so one cannot instantiate unvalidated flavors.
def flavor_config_t_shape_loader():
shape.loader(
name = "flavor_config_t",
shape = _flavor_config_t,
classname = "flavor_config_t",
visibility = ["//antlir/...", "//tupperware/cm/antlir/..."],
)
def new_flavor_config(
name,
build_appliance,
rpm_installer,
rpm_repo_snapshot = None,
rpm_version_set_overrides = None,
version_set_path = BZL_CONST.version_set_allow_all_versions,
unsafe_bypass_flavor_check = False):
"""
Arguments
- `name`: The name of the flavor
- `build_appliance`: Path to a layer target of a build appliance,
containing an installed `rpm_repo_snapshot()`, plus an OS image
with other image build tools like `btrfs`, `dnf`, `yum`, `tar`, `ln`, ...
- `rpm_installer`: The build appliance currently does not set
a default package manager -- in non-default settings, this
has to be chosen per image, since a BA can support multiple
package managers. In the future, if specifying a non-default
installer per image proves onerous when using non-default BAs, we
could support a `default` symlink under `RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR`.
- `rpm_repo_snapshot`: List of target or `/__antlir__` paths,
see `snapshot_install_dir` doc. `None` uses the default determined
by looking up `rpm_installer` in `RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR`.
- `rpm_version_set_overrides`: List of `nevra` objects
(see antlir/bzl/constants.bzl for definition). If rpm with given name to
be installed, the `nevra` defines its version.
- `unsafe_bypass_flavor_check`: Do NOT use.
"""
if build_appliance == None:
fail(
"Must be a target path, or a value from `constants.bzl`",
"build_appliance",
)
if rpm_installer != "yum" and rpm_installer != "dnf":
fail("Unsupported rpm_installer supplied in build_opts")
# When building the BA itself, we need this constant to avoid a circular
# dependency.
#
# This feature is exposed a non-`None` magic constant so that callers
# cannot get confused whether `None` refers to "no BA" or "default BA".
if build_appliance == DO_NOT_USE_BUILD_APPLIANCE:
build_appliance = None
if build_appliance:
build_appliance = normalize_target(build_appliance)
return shape.new(
_flavor_config_t,
name = name,
build_appliance = build_appliance,
rpm_installer = rpm_installer,
rpm_repo_snapshot = (
snapshot_install_dir(rpm_repo_snapshot) if rpm_repo_snapshot else "{}/{}".format(
RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR,
rpm_installer,
)
),
rpm_version_set_overrides = rpm_version_set_overrides,
version_set_path = version_set_path,
unsafe_bypass_flavor_check = unsafe_bypass_flavor_check,
)
def _get_flavor_to_config():
flavor_to_config = {}
for flavor, orig_flavor_config in do_not_use_repo_cfg.get("flavor_to_config", {}).items():
flavor_config = {"name": flavor}
flavor_config.update(orig_flavor_config) # we'll mutate a copy
# Apply `buck -c` overrides.
#
# Buck has a notion of flavors that is separate from Antlir's but
# similar in spirit. It uses # as the delimiter for per-flavor
# config options, so we follow that pattern.
config_key = CONFIG_KEY + "#" + flavor
for key, v in flavor_config.items():
val = native.read_config(config_key, key, None)
if val != None:
flavor_config[key] = val
flavor_to_config[flavor] = new_flavor_config(**flavor_config)
return flavor_to_config
#
# These are repo-specific configuration keys, which can be overridden via
# the Buck CLI for debugging / development purposes.
#
# We do not want to simply use `.buckconfig` for these, because in FBCode,
# the CI cost to updating `.buckconfig` is quite high (every project
# potentially needs to be tested and rebuilt).
#
# Instead, we keep the per-repo configuration in `oss_shim_impl.bzl`, and
# the global defaults here, in `constants.bzl`.
#
# Our underlying configs use the simple type signature of `Mapping[str,
# str]` because we want to support overrides via `buck -c`. So, some very
# simple parsing of structured configuration keys happens in this file.
#
# Configuration sources have the following precedence order:
# - `buck -c antlir.CONFIG_NAME='foo bar'` -- note that our lists are
# generally space-separated, so you'll want to bash quote those.
# - `.buckconfig` -- DO NOT PUT OUR CONFIGS THERE!
# - `do_not_use_repo_cfg` loaded via `oss_shim.bzl`
# - the defaults below -- these have to be reasonable since this is what a
# clean open-source install will use
#
# A note on naming: please put the "topic" of the constant before the
# details, so that buildifier-required lexicographic ordering of dictionary
# keys results in related keys being grouped together.
#
#
# DANGER! ACHTUNG! PELIGRO! PERICRLRM!
# Modifications to this shape's attributes or the values in the instance
# of it below (`REPO_CFG`) could (and likely will) cause excessive
# rebuilding and incur significant build cost. These attributes and values
# are effectively global and should be treated with extreme caution.
# Don't be careless.
repo_config_t = shape.shape(
artifacts_require_repo = bool,
artifact = shape.dict(str, str),
host_mounts_allowed_in_targets = shape.list(shape.path()),
host_mounts_for_repo_artifacts = shape.list(shape.path()),
# This holds the default flavors that a feature should cover.
# Compared to `flavor_to_config`, it does not contain the
# `antlir_test` flavor, which shouldn't be always defined.
flavor_available = shape.list(str),
flavor_default = str,
flavor_to_config = shape.dict(str, _flavor_config_t),
antlir_linux_flavor = str,
antlir_cell_name = str,
)
REPO_CFG = shape.new(
repo_config_t,
# This one is not using the access methods to provide the precedence order
# because the way this is determined is *always* based on the build mode
# provided, ie `@mode/opt` vs `@mode/dev`. And the build mode provided
| |
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
class MLPModel(torch.nn.Module):
@staticmethod
def actFunct(af_type: str):
"""
Returns the specified activation type from torch.nn
______________________________________________________________
Parameters:
af_type: str
The Activation function to return
______________________________________________________________
Returns:
af: torch.nn.function
The specified activation function
"""
if af_type == "relu":
return torch.nn.ReLU()
if af_type == "sigmoid":
return torch.nn.Sigmoid()
elif af_type == "tanh":
return torch.nn.Tanh()
elif af_type == "softmax":
return torch.nn.Softmax(dim=1)
@staticmethod
def lossFunct(lf_type: str):
"""
Returns the specified loss function from torch.nn
______________________________________________________________
Parameters:
lf_type: str
The loss function to return
______________________________________________________________
Returns:
lf: torch.nn.function
The specified loss function
"""
if lf_type == "cross-entropy":
# needs squeezed target
return torch.nn.CrossEntropyLoss() # I:(N,C) O:(N)
elif lf_type == "hinge-embedding":
# needs plain target (no squeeze)
return torch.nn.HingeEmbeddingLoss()
elif lf_type == "bce" or \
lf_type == 'binary-cross-entropy':
# needs squeezed target
return torch.nn.BCELoss() # I:(N,C) O:(N, C)
elif lf_type == "bce-logit":
# needs squeezed target
return torch.nn.BCEWithLogitsLoss() # I:(N,C) O:(N, C)
elif lf_type == "soft-margin": # target .1 and 1
# check target structure
return torch.nn.SoftMarginLoss()
@staticmethod
def transform_target_for_loss(target, loss_funct_str):
"""fix target to match what loss needs."""
need_encode = ['cross-entropy']
if loss_funct_str in need_encode:
target = target.squeeze(1)
return target
def __init__(self, emb, n_hl: int = 1, num_features: int = 10,
n_classes: int = 2, dropout: float = 0.2,
epochs: int = 5, units: int = 25, bias: float = 0.1,
lr: float = 0.01, momentum: float = 0.9,
device: torch.device = torch.device("cpu"),
weights_init: str = "xavier_normal",
hl_actfunct: str = "tanh",
out_actfunct: str = "relu",
loss_funct: str = "cross-entropy",
random_state: int = None,
verbose: bool = False,
embedding_type: str = "mean",
freeze: bool = False) -> None:
"""
Creates a multilayer perceptron object with the specified
parameters using the pytorch framework.
______________________________________________________________
Parameters:
n_hl: int = 1
Number of hidden layers, defaults to 1
num_features: int = 10
Number of features, defaults to 10
n_classes: int = 10
Number of classes, defaults to 10
dropout: float = 0.2
Dropout value, defaults to 0.2
epochs: int = 50
Number of epochs to run, defaults to 20
units: int = 25
Number of units per hidden layer, defaults to 25
bias: float = 0.1
Bias value, defaults to 0.1
lr: float = 0.01
Learning rate, defaults to 0.01
momentum: float = 0.9
Momentum value, defaults to 0.9
device: torch.device = torch.device("cpu")
Specifies how the model is run, defaults to "cpu"
weights_init: str = "xavier_normal"
Specifies how the weights are initialized, defaults to
"xavier_normal"
hl_actfunct: str = "tanh"
Hidden layer activation function, defaults to "tahn"
out_actfunct: str = "relu"
Output activation function, defaults to "relu"
loss_funct: str = "cross-entropy"
Loss function, defaults to "cross-entropy"
random_state: int = None
The seed for the random state
verbose: bool = False
If True: prints out progressive output, defaults to False
______________________________________________________________
Returns:
None
"""
super().__init__()
# seeding
if random_state is not None:
print(f'Setting torch random_state to {random_state}...') \
if verbose else None
torch.manual_seed(random_state)
np.random.seed(random_state)
# parameters
self.device = device
self.epochs = epochs
self.verbose = verbose
self.hl_actfunct = self.actFunct(af_type=hl_actfunct)
self.out_actfunct = self.actFunct(af_type=out_actfunct)
self.out_actfunct_str = out_actfunct
self.loss_funct_str = loss_funct
self.loss_funct = self.lossFunct(lf_type=loss_funct)
self.freeze = freeze
if n_hl == 0:
self.model = torch.nn.Sequential(
torch.nn.Dropout(dropout),
torch.nn.Linear(num_features, n_classes),
self.out_actfunct,
)
elif n_hl == 1:
self.model = torch.nn.Sequential(
torch.nn.Dropout(dropout),
torch.nn.Linear(num_features, units),
self.hl_actfunct,
torch.nn.Dropout(dropout),
torch.nn.Linear(units, n_classes),
self.out_actfunct,
)
elif n_hl >= 2:
self.model = torch.nn.Sequential(
torch.nn.Dropout(dropout),
torch.nn.Linear(num_features, units),
self.hl_actfunct
)
for i in range(1, n_hl):
self.model.add_module(
name=f"HL{i + 1}-Dropout",
module=torch.nn.Dropout(dropout)
)
self.model.add_module(
name=f"HL{i + 1}-Linear",
module=torch.nn.Linear(units, units),
)
self.model.add_module(
name=f"HL{i + 1}-ActFunction",
module=self.hl_actfunct,
)
self.model.add_module(
name="Output-Linear",
module=torch.nn.Linear(units, n_classes),
)
self.model.add_module(
name="Output-ActFunction",
module=self.out_actfunct,
)
for m in self.model:
if isinstance(m, torch.nn.Linear):
# initializing bias
torch.nn.init.constant_(m.bias, val=bias)
# initializing weights
if weights_init == "xavier_normal":
torch.nn.init.xavier_normal_(m.weight, gain=1.0)
self.model.to(device)
self.opt = torch.optim.SGD(
params=self.model.parameters(),
lr=lr,
momentum=momentum
)
self.losses = None
vectors = torch.FloatTensor(emb.vectors)
if device == torch.device("cuda"):
vectors = vectors.to(device)
self.word_embeddings = torch.nn.Embedding.from_pretrained(
vectors, freeze=self.freeze)
self.embedding_type = embedding_type
def forward(self, batch):
"""
Performs a forward step on the model.
______________________________________________________________
Parameters:
batch: torch.nn.tensor
The mini-batch input tensor to update
______________________________________________________________
Returns:
self.model: MLPModel
The updated model
"""
x = None
if self.embedding_type == "mean":
x = torch.mean(self.word_embeddings(batch), dim=1)
elif self.embedding_type == "sum":
x = torch.sum(self.word_embeddings(batch), dim=1)
elif self.embedding_type == "max":
x = torch.max(self.word_embeddings(batch), dim=1)
# TODO: Combine to tensor 3x as long for input # NOTE: I think this can really help us
return self.model(x)
def predict_classes(self, input_tensor):
"""
Makes predictions from a test tensor using the model.
______________________________________________________________
Parameters:
input_tensor: torch.nn.tensor
The tensor to make predictions on.
______________________________________________________________
Returns:
y_pred: np.array
An array containing the predicted classes of the input
tensors.
"""
x = None
# TODO: Why not just send this through forward and call detatch?
if self.embedding_type == "mean": # NOTE: If this is the same as above, this should be a method
x = torch.mean(self.word_embeddings(input_tensor), dim=1)
elif self.embedding_type == "sum":
x = torch.sum(self.word_embeddings(input_tensor), dim=1)
elif self.embedding_type == "max":
x = torch.max(self.word_embeddings(input_tensor), dim=1)
y_pred = self.model(x)
return y_pred.max(dim=1)[1]
def backward(self, output, target):
"""
Performs a backpropogation step computing the loss.
______________________________________________________________
Parameters:
output:
The output after forward with shape (batch_size, num_classes).
target:
The target it is optimizing towards
______________________________________________________________
Returns:
loss: float
How close the estimate was to the gold standard.
"""
target = self.transform_target_for_loss(target, self.loss_funct_str)
if self.loss_funct_str == "bce" or \
self.loss_funct_str == 'binary-cross-entropy':
encoder = OneHotEncoder(sparse=False) # NOTE: Shouldn't this encoding be outside of a function thats called for every data point?
target = encoder.fit_transform(target)
target = torch.FloatTensor(target)
# normalizing between 0 and 1
min_ = output.min(dim=1, keepdim=True)[0]
max_ = output.max(dim=1, keepdim=True)[0]
output = (output - min_) / (max_ - min_)
# output = sigmoid(output) # -> the same as bce-logit
elif self.loss_funct_str == "bce-logit":
encoder = OneHotEncoder(sparse=False)
target = encoder.fit_transform(target)
target = torch.FloatTensor(target)
# BUG output is never generated when bce-logit is called
# calculating the loss
loss = self.loss_funct(output, target)
# resetting the gradients from the optimizer
# more info: https://pytorch.org/docs/stable/optim.html
self.opt.zero_grad()
# calculating gradients
loss.backward()
# updating weights from the model by calling optimizer.step()
self.opt.step()
return loss
def fit(self, loader=None, verbose=False) -> None:
"""
Fits the model to the training data using the models
initialized values. Runs for the models number of epochs.
______________________________________________________________
Parameters:
laoder: torch.nn.Dataloader=None
Dataloader object to load the batches, defaults to None
verbose: bool=False
If True: prints out progressive output, defaults to False
______________________________________________________________
Returns:
None
"""
self.losses = np.empty(shape=self.epochs, dtype=float)
iterator = tqdm(range(self.epochs)) if verbose else range(self.epochs)
for i in iterator:
_loss = []
for n, batch in enumerate(loader):
text, source = batch
text = text.to(self.device)
source = source.to(self.device)
output = self.forward(text)
loss = self.backward(output, source)
_loss.append(loss.item())
self.losses[i] = np.mean(_loss)
print(f'Epoch: {i} loss:', np.mean(_loss))
class RNNModel(torch.nn.Module):
@staticmethod
def lossFunct(lf_type: str):
"""
Returns the specified loss function from torch.nn
______________________________________________________________
Parameters:
lf_type: str
The loss function to return
______________________________________________________________
Returns:
lf: torch.nn.function
The specified loss function
"""
if lf_type == "cross-entropy":
return torch.nn.CrossEntropyLoss() # I:(N,C) O:(N)
# NOTE we should definately still check everything, right? (will studies have to be changed?)
elif lf_type == "hinge-embedding":
# needs plain target (no squeeze)
return torch.nn.HingeEmbeddingLoss()
elif lf_type == "bce" or \
lf_type == 'binary-cross-entropy':
# needs squeezed target
return torch.nn.BCELoss() # I:(N,C) O:(N, C)
elif lf_type == "bce-logit":
# needs squeezed target
return torch.nn.BCEWithLogitsLoss() # I:(N,C) O:(N, C)
elif lf_type == "soft-margin": # target .1 and 1
# check target structure
return torch.nn.SoftMarginLoss()
@staticmethod
def model_constructor(n_hl, units, dropout, num_features, rnn_type,
nonlinearity, bidirectional):
model = None
if rnn_type == "rnn":
model = torch.nn.RNN(
input_size=num_features,
hidden_size=units,
num_layers=n_hl,
nonlinearity=nonlinearity, # -> 'tanh' or 'relu'
batch_first=True, # -> (batch, seq, feature)
dropout=dropout,
bidirectional=bidirectional
)
elif rnn_type == "lstm":
model = torch.nn.LSTM(
input_size=num_features,
hidden_size=units,
num_layers=n_hl,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional
)
elif rnn_type == "gru":
|